summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--Documentation/networking/ip-sysctl.txt2
-rwxr-xr-xDocumentation/target/tcm_mod_builder.py49
-rw-r--r--Documentation/thermal/cpu-cooling-api.txt15
-rw-r--r--MAINTAINERS27
-rw-r--r--Makefile3
-rw-r--r--arch/arm/boot/dts/imx6sx-sdb.dts15
-rw-r--r--arch/arm/boot/dts/vf610-twr.dts15
-rw-r--r--arch/arm/include/uapi/asm/unistd.h1
-rw-r--r--arch/arm/kernel/calls.S1
-rw-r--r--arch/arm/kernel/perf_regs.c8
-rw-r--r--arch/arm/mm/dump.c9
-rw-r--r--arch/arm/mm/init.c4
-rw-r--r--arch/arm/mm/mmu.c4
-rw-r--r--arch/arm64/include/asm/arch_timer.h1
-rw-r--r--arch/arm64/include/asm/cpu.h5
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h2
-rw-r--r--arch/arm64/include/asm/processor.h4
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/kernel/cpuinfo.c10
-rw-r--r--arch/arm64/kernel/efi.c2
-rw-r--r--arch/arm64/kernel/module.c1
-rw-r--r--arch/arm64/kernel/perf_regs.c8
-rw-r--r--arch/arm64/kernel/setup.c1
-rw-r--r--arch/arm64/kernel/smp_spin_table.c1
-rw-r--r--arch/arm64/kvm/hyp.S1
-rw-r--r--arch/arm64/kvm/reset.c1
-rw-r--r--arch/blackfin/mach-bf533/boards/stamp.c1
-rw-r--r--arch/ia64/kernel/acpi.c9
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h1
-rw-r--r--arch/m68k/kernel/syscalltable.S1
-rw-r--r--arch/powerpc/include/asm/thread_info.h13
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S1
-rw-r--r--arch/s390/hypfs/hypfs_vm.c2
-rw-r--r--arch/s390/include/asm/irqflags.h2
-rw-r--r--arch/s390/include/asm/timex.h10
-rw-r--r--arch/s390/include/uapi/asm/unistd.h3
-rw-r--r--arch/s390/kernel/syscalls.S1
-rw-r--r--arch/s390/kernel/uprobes.c69
-rw-r--r--arch/s390/kernel/vtime.c2
-rw-r--r--arch/s390/mm/pgtable.c5
-rw-r--r--arch/s390/net/bpf_jit_comp.c8
-rw-r--r--arch/x86/boot/Makefile1
-rw-r--r--arch/x86/crypto/Makefile2
-rw-r--r--arch/x86/crypto/aes_ctrby8_avx-x86_64.S46
-rw-r--r--arch/x86/include/asm/vgtod.h6
-rw-r--r--arch/x86/kernel/acpi/boot.c9
-rw-r--r--arch/x86/kernel/cpu/Makefile1
-rw-r--r--arch/x86/kernel/cpu/mkcapflags.sh2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c17
-rw-r--r--arch/x86/kernel/perf_regs.c90
-rw-r--r--arch/x86/lib/insn.c2
-rw-r--r--arch/x86/mm/init.c37
-rw-r--r--arch/x86/vdso/vma.c45
-rw-r--r--arch/x86/xen/enlighten.c22
-rw-r--r--arch/x86/xen/p2m.c20
-rw-r--r--arch/x86/xen/setup.c42
-rw-r--r--arch/x86/xen/time.c18
-rw-r--r--block/blk-core.c21
-rw-r--r--block/blk-mq-tag.c14
-rw-r--r--block/blk-mq-tag.h1
-rw-r--r--block/blk-mq.c75
-rw-r--r--block/blk-mq.h1
-rw-r--r--block/blk-timeout.c3
-rw-r--r--drivers/Makefile6
-rw-r--r--drivers/acpi/acpi_processor.c25
-rw-r--r--drivers/acpi/device_pm.c2
-rw-r--r--drivers/acpi/int340x_thermal.c11
-rw-r--r--drivers/acpi/processor_core.c56
-rw-r--r--drivers/acpi/scan.c13
-rw-r--r--drivers/acpi/video.c10
-rw-r--r--drivers/block/null_blk.c2
-rw-r--r--drivers/block/nvme-core.c175
-rw-r--r--drivers/block/virtio_blk.c2
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c3
-rw-r--r--drivers/gpio/gpio-dln2.c156
-rw-r--r--drivers/gpio/gpio-grgpio.c3
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c316
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c28
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c2
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c8
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c6
-rw-r--r--drivers/gpu/drm/i915/intel_display.c8
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c27
-rw-r--r--drivers/gpu/drm/nouveau/core/core/event.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/core/notify.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nve0.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c65
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c35
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c4
-rw-r--r--drivers/gpu/drm/radeon/cikd.h2
-rw-r--r--drivers/gpu/drm/radeon/dce3_1_afmt.c2
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c2
-rw-r--r--drivers/hid/Kconfig3
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-input.c3
-rw-r--r--drivers/hid/hid-kye.c4
-rw-r--r--drivers/hid/hid-logitech-dj.c16
-rw-r--r--drivers/hid/hid-logitech-hidpp.c41
-rw-r--r--drivers/hid/hid-roccat-pyra.c8
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c5
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/iommu/intel-iommu.c12
-rw-r--r--drivers/iommu/ipmmu-vmsa.c6
-rw-r--r--drivers/iommu/rockchip-iommu.c1
-rw-r--r--drivers/isdn/hardware/eicon/message.c2
-rw-r--r--drivers/leds/leds-netxbig.c12
-rw-r--r--drivers/misc/cxl/context.c82
-rw-r--r--drivers/misc/cxl/file.c14
-rw-r--r--drivers/mmc/host/sdhci-acpi.c2
-rw-r--r--drivers/mmc/host/sdhci-pci.c25
-rw-r--r--drivers/mmc/host/sdhci-pci.h3
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c15
-rw-r--r--drivers/mmc/host/sdhci.c80
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c4
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c15
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c24
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c23
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c6
-rw-r--r--drivers/net/ethernet/dnet.c18
-rw-r--r--drivers/net/ethernet/freescale/fec.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c10
-rw-r--r--drivers/net/ethernet/intel/Kconfig11
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c104
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c9
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c1
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c9
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h5
-rw-r--r--drivers/net/ethernet/ti/cpsw.c30
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c10
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c1
-rw-r--r--drivers/net/team/team.c16
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c10
-rw-r--r--drivers/net/usb/r8152.c17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c19
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c8
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c4
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c32
-rw-r--r--drivers/net/xen-netback/xenbus.c1
-rw-r--r--drivers/net/xen-netfront.c71
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c57
-rw-r--r--drivers/pinctrl/pinctrl-st.c5
-rw-r--r--drivers/s390/crypto/ap_bus.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c4
-rw-r--r--drivers/target/iscsi/iscsi_target.c12
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h4
-rw-r--r--drivers/target/target_core_device.c54
-rw-r--r--drivers/target/target_core_file.c12
-rw-r--r--drivers/target/target_core_iblock.c3
-rw-r--r--drivers/target/target_core_pr.c12
-rw-r--r--drivers/target/target_core_rd.c1
-rw-r--r--drivers/target/target_core_sbc.c15
-rw-r--r--drivers/target/target_core_spc.c5
-rw-r--r--drivers/target/target_core_user.c1
-rw-r--r--drivers/thermal/imx_thermal.c2
-rw-r--r--drivers/thermal/int340x_thermal/acpi_thermal_rel.c16
-rw-r--r--drivers/thermal/int340x_thermal/processor_thermal_device.c2
-rw-r--r--drivers/thermal/of-thermal.c2
-rw-r--r--drivers/thermal/rcar_thermal.c17
-rw-r--r--drivers/thermal/thermal_core.h4
-rw-r--r--drivers/vfio/pci/vfio_pci.c4
-rw-r--r--drivers/vhost/net.c2
-rw-r--r--drivers/vhost/scsi.c24
-rw-r--r--drivers/video/fbdev/broadsheetfb.c8
-rw-r--r--drivers/video/fbdev/simplefb.c2
-rw-r--r--drivers/virtio/virtio_pci_common.c10
-rw-r--r--drivers/virtio/virtio_pci_common.h1
-rw-r--r--drivers/virtio/virtio_pci_legacy.c12
-rw-r--r--fs/btrfs/backref.c13
-rw-r--r--fs/btrfs/delayed-inode.c8
-rw-r--r--fs/btrfs/extent-tree.c12
-rw-r--r--fs/btrfs/inode.c4
-rw-r--r--fs/btrfs/scrub.c2
-rw-r--r--fs/ceph/addr.c2
-rw-r--r--fs/ceph/locks.c64
-rw-r--r--fs/ceph/mds_client.c4
-rw-r--r--fs/cifs/file.c34
-rw-r--r--fs/ext4/extents.c4
-rw-r--r--fs/ext4/file.c220
-rw-r--r--fs/ext4/resize.c24
-rw-r--r--fs/ext4/super.c2
-rw-r--r--fs/fcntl.c5
-rw-r--r--fs/fuse/dev.c51
-rw-r--r--fs/fuse/dir.c31
-rw-r--r--fs/fuse/fuse_i.h2
-rw-r--r--fs/fuse/inode.c5
-rw-r--r--fs/inode.c3
-rw-r--r--fs/lockd/svcsubs.c26
-rw-r--r--fs/locks.c569
-rw-r--r--fs/nfs/delegation.c23
-rw-r--r--fs/nfs/nfs4state.c70
-rw-r--r--fs/nfs/pagelist.c6
-rw-r--r--fs/nfs/write.c41
-rw-r--r--fs/nfsd/nfs4state.c21
-rw-r--r--fs/notify/fanotify/fanotify_user.c10
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c5
-rw-r--r--fs/ocfs2/namei.c43
-rw-r--r--fs/read_write.c2
-rw-r--r--include/acpi/processor.h8
-rw-r--r--include/asm-generic/tlb.h8
-rw-r--r--include/linux/acpi.h4
-rw-r--r--include/linux/blk-mq.h8
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/ceph/osd_client.h4
-rw-r--r--include/linux/compiler.h12
-rw-r--r--include/linux/fs.h54
-rw-r--r--include/linux/kdb.h62
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mmc/sdhci.h1
-rw-r--r--include/linux/netdevice.h6
-rw-r--r--include/linux/perf_event.h12
-rw-r--r--include/linux/perf_regs.h16
-rw-r--r--include/linux/rmap.h10
-rw-r--r--include/linux/writeback.h1
-rw-r--r--include/net/mac80211.h7
-rw-r--r--include/target/target_core_backend.h1
-rw-r--r--include/target/target_core_backend_configfs.h2
-rw-r--r--include/target/target_core_base.h3
-rw-r--r--include/uapi/asm-generic/fcntl.h2
-rw-r--r--include/uapi/linux/kfd_ioctl.h37
-rw-r--r--include/uapi/linux/openvswitch.h4
-rw-r--r--include/xen/interface/nmi.h51
-rw-r--r--kernel/debug/debug_core.c52
-rw-r--r--kernel/debug/kdb/kdb_bp.c37
-rw-r--r--kernel/debug/kdb/kdb_debugger.c4
-rw-r--r--kernel/debug/kdb/kdb_main.c267
-rw-r--r--kernel/debug/kdb/kdb_private.h3
-rw-r--r--kernel/events/core.c19
-rw-r--r--kernel/exit.c12
-rw-r--r--kernel/locking/mutex-debug.c2
-rw-r--r--kernel/sched/core.c13
-rw-r--r--kernel/sched/deadline.c25
-rw-r--r--kernel/sched/fair.c6
-rw-r--r--kernel/trace/trace_kdb.c4
-rw-r--r--lib/Kconfig.kgdb25
-rw-r--r--lib/assoc_array.c1
-rw-r--r--mm/Kconfig.debug9
-rw-r--r--mm/memcontrol.c17
-rw-r--r--mm/memory.c39
-rw-r--r--mm/mmap.c13
-rw-r--r--mm/page-writeback.c43
-rw-r--r--mm/rmap.c42
-rw-r--r--mm/vmscan.c24
-rw-r--r--net/batman-adv/multicast.c11
-rw-r--r--net/batman-adv/network-coding.c2
-rw-r--r--net/batman-adv/originator.c7
-rw-r--r--net/batman-adv/routing.c6
-rw-r--r--net/bridge/br_input.c3
-rw-r--r--net/ceph/auth_x.c2
-rw-r--r--net/ceph/mon_client.c2
-rw-r--r--net/core/neighbour.c44
-rw-r--r--net/ipv4/netfilter/nft_redir_ipv4.c8
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv6/netfilter/nft_redir_ipv6.c8
-rw-r--r--net/mac80211/key.c12
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c10
-rw-r--r--net/netfilter/nf_conntrack_core.c20
-rw-r--r--net/netfilter/nf_tables_api.c14
-rw-r--r--net/netfilter/nfnetlink.c5
-rw-r--r--net/netfilter/nft_nat.c8
-rw-r--r--net/openvswitch/datapath.c3
-rw-r--r--net/openvswitch/flow.c5
-rw-r--r--net/openvswitch/vport.c2
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/tipc/bcast.c5
-rw-r--r--scripts/Makefile.clean16
-rw-r--r--security/keys/gc.c4
-rw-r--r--sound/firewire/fireworks/fireworks_transaction.c2
-rw-r--r--sound/pci/hda/patch_hdmi.c2
-rw-r--r--sound/pci/hda/patch_sigmatel.c4
-rw-r--r--sound/usb/caiaq/audio.c2
-rw-r--r--tools/lib/lockdep/preload.c4
-rw-r--r--tools/perf/builtin-annotate.c2
-rw-r--r--tools/perf/builtin-diff.c46
-rw-r--r--tools/perf/builtin-list.c13
-rw-r--r--tools/perf/builtin-report.c24
-rw-r--r--tools/perf/builtin-top.c4
-rw-r--r--tools/perf/tests/hists_cumulate.c66
-rw-r--r--tools/perf/tests/hists_filter.c2
-rw-r--r--tools/perf/tests/hists_output.c10
-rw-r--r--tools/perf/ui/browsers/hists.c2
-rw-r--r--tools/perf/ui/hist.c3
-rw-r--r--tools/perf/ui/tui/setup.c26
-rw-r--r--tools/perf/util/callchain.c30
-rw-r--r--tools/perf/util/callchain.h2
-rw-r--r--tools/perf/util/hist.c18
-rw-r--r--tools/perf/util/hist.h2
-rw-r--r--tools/perf/util/probe-event.c6
-rw-r--r--tools/perf/util/probe-finder.c18
-rw-r--r--tools/testing/selftests/exec/execveat.c19
-rw-r--r--tools/testing/selftests/mqueue/mq_perf_tests.c3
-rw-r--r--tools/testing/selftests/vm/Makefile2
328 files changed, 3749 insertions, 2117 deletions
diff --git a/.mailmap b/.mailmap
index ada8ad696b2e..d357e1bd2a43 100644
--- a/.mailmap
+++ b/.mailmap
@@ -51,6 +51,7 @@ Greg Kroah-Hartman <gregkh@suse.de>
Greg Kroah-Hartman <greg@kroah.com>
Henk Vergonet <Henk.Vergonet@gmail.com>
Henrik Kretzschmar <henne@nachtwindheim.de>
+Henrik Rydberg <rydberg@bitmath.org>
Herbert Xu <herbert@gondor.apana.org.au>
Jacob Shin <Jacob.Shin@amd.com>
James Bottomley <jejb@mulgrave.(none)>
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 9bffdfc648dc..85b022179104 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -66,6 +66,8 @@ fwmark_reflect - BOOLEAN
route/max_size - INTEGER
Maximum number of routes allowed in the kernel. Increase
this when using large numbers of interfaces and/or routes.
+ From linux kernel 3.6 onwards, this is deprecated for ipv4
+ as route cache is no longer used.
neigh/default/gc_thresh1 - INTEGER
Minimum number of entries to keep. Garbage collector will not
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py
index 230ce71f4d75..2b47704f75cb 100755
--- a/Documentation/target/tcm_mod_builder.py
+++ b/Documentation/target/tcm_mod_builder.py
@@ -389,9 +389,6 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
- buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
- buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
- buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
@@ -402,7 +399,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
- buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
+ buf += " .aborted_task = " + fabric_mod_name + "_aborted_task,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
@@ -428,7 +425,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
- buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
+ buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
@@ -595,7 +592,7 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
- buf += " return \"" + fabric_mod_name[4:] + "\";\n"
+ buf += " return \"" + fabric_mod_name + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
@@ -820,27 +817,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
- if re.search('stop_session\)\(', fo):
- buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
- buf += "{\n"
- buf += " return;\n"
- buf += "}\n\n"
- bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
-
- if re.search('fall_back_to_erl0\)\(', fo):
- buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
- buf += "{\n"
- buf += " return;\n"
- buf += "}\n\n"
- bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
-
- if re.search('sess_logged_in\)\(', fo):
- buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
- buf += "{\n"
- buf += " return 0;\n"
- buf += "}\n\n"
- bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
-
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
@@ -898,19 +874,18 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
- buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
+ buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
- buf += " return 0;\n"
+ buf += " return;\n"
buf += "}\n\n"
- bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
+ bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
- if re.search('is_state_remove\)\(', fo):
- buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
+ if re.search('aborted_task\)\(', fo):
+ buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n"
buf += "{\n"
- buf += " return 0;\n"
+ buf += " return;\n"
buf += "}\n\n"
- bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
-
+ bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
@@ -1018,11 +993,11 @@ def main(modname, proto_ident):
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
- input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
+ input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
- input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
+ input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
diff --git a/Documentation/thermal/cpu-cooling-api.txt b/Documentation/thermal/cpu-cooling-api.txt
index fca24c931ec8..753e47cc2e20 100644
--- a/Documentation/thermal/cpu-cooling-api.txt
+++ b/Documentation/thermal/cpu-cooling-api.txt
@@ -3,7 +3,7 @@ CPU cooling APIs How To
Written by Amit Daniel Kachhap <amit.kachhap@linaro.org>
-Updated: 12 May 2012
+Updated: 6 Jan 2015
Copyright (c) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
@@ -25,7 +25,18 @@ the user. The registration APIs returns the cooling device pointer.
clip_cpus: cpumask of cpus where the frequency constraints will happen.
-1.1.2 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
+1.1.2 struct thermal_cooling_device *of_cpufreq_cooling_register(
+ struct device_node *np, const struct cpumask *clip_cpus)
+
+ This interface function registers the cpufreq cooling device with
+ the name "thermal-cpufreq-%x" linking it with a device tree node, in
+ order to bind it via the thermal DT code. This api can support multiple
+ instances of cpufreq cooling devices.
+
+ np: pointer to the cooling device device tree node
+ clip_cpus: cpumask of cpus where the frequency constraints will happen.
+
+1.1.3 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
This interface function unregisters the "thermal-cpufreq-%x" cooling device.
diff --git a/MAINTAINERS b/MAINTAINERS
index ddb9ac8d32b3..ab6610e6c610 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -724,15 +724,15 @@ F: include/uapi/linux/apm_bios.h
F: drivers/char/apm-emulation.c
APPLE BCM5974 MULTITOUCH DRIVER
-M: Henrik Rydberg <rydberg@euromail.se>
+M: Henrik Rydberg <rydberg@bitmath.org>
L: linux-input@vger.kernel.org
-S: Maintained
+S: Odd fixes
F: drivers/input/mouse/bcm5974.c
APPLE SMC DRIVER
-M: Henrik Rydberg <rydberg@euromail.se>
+M: Henrik Rydberg <rydberg@bitmath.org>
L: lm-sensors@lm-sensors.org
-S: Maintained
+S: Odd fixes
F: drivers/hwmon/applesmc.c
APPLETALK NETWORK LAYER
@@ -2259,6 +2259,7 @@ F: drivers/gpio/gpio-bt8xx.c
BTRFS FILE SYSTEM
M: Chris Mason <clm@fb.com>
M: Josef Bacik <jbacik@fb.com>
+M: David Sterba <dsterba@suse.cz>
L: linux-btrfs@vger.kernel.org
W: http://btrfs.wiki.kernel.org/
Q: http://patchwork.kernel.org/project/linux-btrfs/list/
@@ -4748,7 +4749,7 @@ S: Supported
F: drivers/scsi/ipr.*
IBM Power Virtual Ethernet Device Driver
-M: Santiago Leon <santil@linux.vnet.ibm.com>
+M: Thomas Falcon <tlfalcon@linux.vnet.ibm.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/ibm/ibmveth.*
@@ -4940,10 +4941,10 @@ F: include/uapi/linux/input.h
F: include/linux/input/
INPUT MULTITOUCH (MT) PROTOCOL
-M: Henrik Rydberg <rydberg@euromail.se>
+M: Henrik Rydberg <rydberg@bitmath.org>
L: linux-input@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rydberg/input-mt.git
-S: Maintained
+S: Odd fixes
F: Documentation/input/multi-touch-protocol.txt
F: drivers/input/input-mt.c
K: \b(ABS|SYN)_MT_
@@ -5279,6 +5280,15 @@ W: www.open-iscsi.org
Q: http://patchwork.kernel.org/project/linux-rdma/list/
F: drivers/infiniband/ulp/iser/
+ISCSI EXTENSIONS FOR RDMA (ISER) TARGET
+M: Sagi Grimberg <sagig@mellanox.com>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
+L: linux-rdma@vger.kernel.org
+L: target-devel@vger.kernel.org
+S: Supported
+W: http://www.linux-iscsi.org
+F: drivers/infiniband/ulp/isert
+
ISDN SUBSYSTEM
M: Karsten Keil <isdn@linux-pingi.de>
L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
@@ -9533,7 +9543,8 @@ F: drivers/platform/x86/thinkpad_acpi.c
TI BANDGAP AND THERMAL DRIVER
M: Eduardo Valentin <edubezval@gmail.com>
L: linux-pm@vger.kernel.org
-S: Supported
+L: linux-omap@vger.kernel.org
+S: Maintained
F: drivers/thermal/ti-soc-thermal/
TI CLOCK DRIVER
diff --git a/Makefile b/Makefile
index eb4eca56843a..e41a3356abee 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 19
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
NAME = Diseased Newt
# *DOCUMENTATION*
@@ -391,6 +391,7 @@ USERINCLUDE := \
# Needed to be compatible with the O= option
LINUXINCLUDE := \
-I$(srctree)/arch/$(hdr-arch)/include \
+ -Iarch/$(hdr-arch)/include/generated/uapi \
-Iarch/$(hdr-arch)/include/generated \
$(if $(KBUILD_SRC), -I$(srctree)/include) \
-Iinclude \
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts
index 1e6e5cc1c14c..8c1febd7e3f2 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dts
+++ b/arch/arm/boot/dts/imx6sx-sdb.dts
@@ -159,13 +159,28 @@
pinctrl-0 = <&pinctrl_enet1>;
phy-supply = <&reg_enet_3v3>;
phy-mode = "rgmii";
+ phy-handle = <&ethphy1>;
status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy1: ethernet-phy@0 {
+ reg = <0>;
+ };
+
+ ethphy2: ethernet-phy@1 {
+ reg = <1>;
+ };
+ };
};
&fec2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet2>;
phy-mode = "rgmii";
+ phy-handle = <&ethphy2>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/vf610-twr.dts b/arch/arm/boot/dts/vf610-twr.dts
index a0f762159cb2..f2b64b1b00fa 100644
--- a/arch/arm/boot/dts/vf610-twr.dts
+++ b/arch/arm/boot/dts/vf610-twr.dts
@@ -129,13 +129,28 @@
&fec0 {
phy-mode = "rmii";
+ phy-handle = <&ethphy0>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_fec0>;
status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+
+ ethphy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+ };
};
&fec1 {
phy-mode = "rmii";
+ phy-handle = <&ethphy1>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_fec1>;
status = "okay";
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 705bb7620673..0c3f5a0dafd3 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -413,6 +413,7 @@
#define __NR_getrandom (__NR_SYSCALL_BASE+384)
#define __NR_memfd_create (__NR_SYSCALL_BASE+385)
#define __NR_bpf (__NR_SYSCALL_BASE+386)
+#define __NR_execveat (__NR_SYSCALL_BASE+387)
/*
* The following SWIs are ARM private.
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index e51833f8cc38..05745eb838c5 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -396,6 +396,7 @@
CALL(sys_getrandom)
/* 385 */ CALL(sys_memfd_create)
CALL(sys_bpf)
+ CALL(sys_execveat)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/kernel/perf_regs.c b/arch/arm/kernel/perf_regs.c
index 6e4379c67cbc..592dda3f21ff 100644
--- a/arch/arm/kernel/perf_regs.c
+++ b/arch/arm/kernel/perf_regs.c
@@ -28,3 +28,11 @@ u64 perf_reg_abi(struct task_struct *task)
{
return PERF_SAMPLE_REGS_ABI_32;
}
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs,
+ struct pt_regs *regs_user_copy)
+{
+ regs_user->regs = task_pt_regs(current);
+ regs_user->abi = perf_reg_abi(current);
+}
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index 59424937e52b..9fe8e241335c 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -220,9 +220,6 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u
static const char units[] = "KMGTPE";
u64 prot = val & pg_level[level].mask;
- if (addr < USER_PGTABLES_CEILING)
- return;
-
if (!st->level) {
st->level = level;
st->current_prot = prot;
@@ -308,15 +305,13 @@ static void walk_pgd(struct seq_file *m)
pgd_t *pgd = swapper_pg_dir;
struct pg_state st;
unsigned long addr;
- unsigned i, pgdoff = USER_PGTABLES_CEILING / PGDIR_SIZE;
+ unsigned i;
memset(&st, 0, sizeof(st));
st.seq = m;
st.marker = address_markers;
- pgd += pgdoff;
-
- for (i = pgdoff; i < PTRS_PER_PGD; i++, pgd++) {
+ for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
addr = i * PGDIR_SIZE;
if (!pgd_none(*pgd)) {
walk_pud(&st, pgd, addr);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 98ad9c79ea0e..2495c8cb47ba 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -658,8 +658,8 @@ static struct section_perm ro_perms[] = {
.start = (unsigned long)_stext,
.end = (unsigned long)__init_begin,
#ifdef CONFIG_ARM_LPAE
- .mask = ~PMD_SECT_RDONLY,
- .prot = PMD_SECT_RDONLY,
+ .mask = ~L_PMD_SECT_RDONLY,
+ .prot = L_PMD_SECT_RDONLY,
#else
.mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
.prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index cda7c40999b6..4e6ef896c619 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1329,8 +1329,8 @@ static void __init kmap_init(void)
static void __init map_lowmem(void)
{
struct memblock_region *reg;
- unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
- unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+ phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
+ phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
/* Map all the lowmem memory banks. */
for_each_memblock(memory, reg) {
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index b1fa4e614718..fbe0ca31a99c 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -21,6 +21,7 @@
#include <asm/barrier.h>
+#include <linux/bug.h>
#include <linux/init.h>
#include <linux/types.h>
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
index ace70682499b..8e797b2fcc01 100644
--- a/arch/arm64/include/asm/cpu.h
+++ b/arch/arm64/include/asm/cpu.h
@@ -39,6 +39,7 @@ struct cpuinfo_arm64 {
u64 reg_id_aa64pfr0;
u64 reg_id_aa64pfr1;
+ u32 reg_id_dfr0;
u32 reg_id_isar0;
u32 reg_id_isar1;
u32 reg_id_isar2;
@@ -51,6 +52,10 @@ struct cpuinfo_arm64 {
u32 reg_id_mmfr3;
u32 reg_id_pfr0;
u32 reg_id_pfr1;
+
+ u32 reg_mvfr0;
+ u32 reg_mvfr1;
+ u32 reg_mvfr2;
};
DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data);
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 8127e45e2637..865a7e28ea2d 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -41,6 +41,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
+ if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
+ vcpu->arch.hcr_el2 &= ~HCR_RW;
}
static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 286b1bec547c..f9be30ea1cbd 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -31,6 +31,7 @@
#include <asm/fpsimd.h>
#include <asm/hw_breakpoint.h>
+#include <asm/pgtable-hwdef.h>
#include <asm/ptrace.h>
#include <asm/types.h>
@@ -123,9 +124,6 @@ struct task_struct;
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
-/* Prepare to copy thread state - unlazy all lazy status */
-#define prepare_to_copy(tsk) do { } while (0)
-
unsigned long get_wchan(struct task_struct *p);
#define cpu_relax() barrier()
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 49c9aefd24a5..b780c6c76eec 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -44,7 +44,7 @@
#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
-#define __NR_compat_syscalls 386
+#define __NR_compat_syscalls 387
#endif
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 57b641747534..07d435cf2eea 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -147,6 +147,7 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
* If we have AArch32, we care about 32-bit features for compat. These
* registers should be RES0 otherwise.
*/
+ diff |= CHECK(id_dfr0, boot, cur, cpu);
diff |= CHECK(id_isar0, boot, cur, cpu);
diff |= CHECK(id_isar1, boot, cur, cpu);
diff |= CHECK(id_isar2, boot, cur, cpu);
@@ -165,6 +166,10 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
diff |= CHECK(id_pfr0, boot, cur, cpu);
diff |= CHECK(id_pfr1, boot, cur, cpu);
+ diff |= CHECK(mvfr0, boot, cur, cpu);
+ diff |= CHECK(mvfr1, boot, cur, cpu);
+ diff |= CHECK(mvfr2, boot, cur, cpu);
+
/*
* Mismatched CPU features are a recipe for disaster. Don't even
* pretend to support them.
@@ -189,6 +194,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
+ info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
@@ -202,6 +208,10 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
+ info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
+ info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
+ info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
+
cpuinfo_detect_icache_policy(info);
check_local_cpu_errata();
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 6fac253bc783..2bb4347d0edf 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -326,6 +326,7 @@ void __init efi_idmap_init(void)
/* boot time idmap_pg_dir is incomplete, so fill in missing parts */
efi_setup_idmap();
+ early_memunmap(memmap.map, memmap.map_end - memmap.map);
}
static int __init remap_region(efi_memory_desc_t *md, void **new)
@@ -380,7 +381,6 @@ static int __init arm64_enter_virtual_mode(void)
}
mapsize = memmap.map_end - memmap.map;
- early_memunmap(memmap.map, mapsize);
if (efi_runtime_disabled()) {
pr_info("EFI runtime services will be disabled.\n");
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index fd027b101de5..9b6f71db2709 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -25,6 +25,7 @@
#include <linux/mm.h>
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
+#include <asm/alternative.h>
#include <asm/insn.h>
#include <asm/sections.h>
diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c
index 6762ad705587..3f62b35fb6f1 100644
--- a/arch/arm64/kernel/perf_regs.c
+++ b/arch/arm64/kernel/perf_regs.c
@@ -50,3 +50,11 @@ u64 perf_reg_abi(struct task_struct *task)
else
return PERF_SAMPLE_REGS_ABI_64;
}
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs,
+ struct pt_regs *regs_user_copy)
+{
+ regs_user->regs = task_pt_regs(current);
+ regs_user->abi = perf_reg_abi(current);
+}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index b80991166754..20fe2932ad0c 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -402,6 +402,7 @@ void __init setup_arch(char **cmdline_p)
request_standard_resources();
efi_idmap_init();
+ early_ioremap_reset();
unflatten_device_tree();
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 4f93c67e63de..14944e5b28da 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -25,6 +25,7 @@
#include <asm/cacheflush.h>
#include <asm/cpu_ops.h>
#include <asm/cputype.h>
+#include <asm/io.h>
#include <asm/smp_plat.h>
extern void secondary_holding_pen(void);
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index fbe909fb0a1a..c3ca89c27c6b 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -1014,6 +1014,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
* Instead, we invalidate Stage-2 for this IPA, and the
* whole of Stage-1. Weep...
*/
+ lsr x1, x1, #12
tlbi ipas2e1is, x1
/*
* We have to ensure completion of the invalidation at Stage-2,
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 70a7816535cd..0b4326578985 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -90,7 +90,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
if (!cpu_has_32bit_el1())
return -EINVAL;
cpu_reset = &default_regs_reset32;
- vcpu->arch.hcr_el2 &= ~HCR_RW;
} else {
cpu_reset = &default_regs_reset;
}
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c
index 6f4bac969bf7..23eada79439c 100644
--- a/arch/blackfin/mach-bf533/boards/stamp.c
+++ b/arch/blackfin/mach-bf533/boards/stamp.c
@@ -7,6 +7,7 @@
*/
#include <linux/device.h>
+#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 615ef81def49..e795cb848154 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -893,13 +893,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
}
/* wrapper to silence section mismatch warning */
-int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
+int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu)
{
return _acpi_map_lsapic(handle, physid, pcpu);
}
-EXPORT_SYMBOL(acpi_map_lsapic);
+EXPORT_SYMBOL(acpi_map_cpu);
-int acpi_unmap_lsapic(int cpu)
+int acpi_unmap_cpu(int cpu)
{
ia64_cpu_to_sapicid[cpu] = -1;
set_cpu_present(cpu, false);
@@ -910,8 +910,7 @@ int acpi_unmap_lsapic(int cpu)
return (0);
}
-
-EXPORT_SYMBOL(acpi_unmap_lsapic);
+EXPORT_SYMBOL(acpi_unmap_cpu);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
#ifdef CONFIG_ACPI_NUMA
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 75e75d7b1702..244e0dbe45db 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
#include <uapi/asm/unistd.h>
-#define NR_syscalls 355
+#define NR_syscalls 356
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 2c1bec9a14b6..61fb6cb9d2ae 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -360,5 +360,6 @@
#define __NR_getrandom 352
#define __NR_memfd_create 353
#define __NR_bpf 354
+#define __NR_execveat 355
#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 2ca219e184cd..a0ec4303f2c8 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -375,4 +375,5 @@ ENTRY(sys_call_table)
.long sys_getrandom
.long sys_memfd_create
.long sys_bpf
+ .long sys_execveat /* 355 */
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index ebc4f165690a..0be6c681cab1 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -23,9 +23,9 @@
#define THREAD_SIZE (1 << THREAD_SHIFT)
#ifdef CONFIG_PPC64
-#define CURRENT_THREAD_INFO(dest, sp) clrrdi dest, sp, THREAD_SHIFT
+#define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(clrrdi dest, sp, THREAD_SHIFT)
#else
-#define CURRENT_THREAD_INFO(dest, sp) rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT
+#define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT)
#endif
#ifndef __ASSEMBLY__
@@ -71,12 +71,13 @@ struct thread_info {
#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
/* how to get the thread information struct from C */
-register unsigned long __current_r1 asm("r1");
static inline struct thread_info *current_thread_info(void)
{
- /* gcc4, at least, is smart enough to turn this into a single
- * rlwinm for ppc32 and clrrdi for ppc64 */
- return (struct thread_info *)(__current_r1 & ~(THREAD_SIZE-1));
+ unsigned long val;
+
+ asm (CURRENT_THREAD_INFO(%0,1) : "=r" (val));
+
+ return (struct thread_info *)val;
}
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 54eca8b3b288..0509bca5e830 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -40,7 +40,6 @@ BEGIN_FTR_SECTION; \
b 1f; \
END_FTR_SECTION(0, 1); \
ld r12,opal_tracepoint_refcount@toc(r2); \
- std r12,32(r1); \
cmpdi r12,0; \
bne- LABEL; \
1:
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index 32040ace00ea..afbe07907c10 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -231,7 +231,7 @@ failed:
struct dbfs_d2fc_hdr {
u64 len; /* Length of d2fc buffer without header */
u16 version; /* Version of header */
- char tod_ext[16]; /* TOD clock for d2fc */
+ char tod_ext[STORE_CLOCK_EXT_SIZE]; /* TOD clock for d2fc */
u64 count; /* Number of VM guests in d2fc buffer */
char reserved[30];
} __attribute__ ((packed));
diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h
index 37b9091ab8c0..16aa0c779e07 100644
--- a/arch/s390/include/asm/irqflags.h
+++ b/arch/s390/include/asm/irqflags.h
@@ -36,7 +36,7 @@ static inline notrace void __arch_local_irq_ssm(unsigned long flags)
static inline notrace unsigned long arch_local_save_flags(void)
{
- return __arch_local_irq_stosm(0x00);
+ return __arch_local_irq_stnsm(0xff);
}
static inline notrace unsigned long arch_local_irq_save(void)
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 8beee1cceba4..98eb2a579223 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -67,20 +67,22 @@ static inline void local_tick_enable(unsigned long long comp)
set_clock_comparator(S390_lowcore.clock_comparator);
}
-#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
+#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
+#define STORE_CLOCK_EXT_SIZE 16 /* stcke writes 16 bytes */
typedef unsigned long long cycles_t;
-static inline void get_tod_clock_ext(char clk[16])
+static inline void get_tod_clock_ext(char *clk)
{
- typedef struct { char _[sizeof(clk)]; } addrtype;
+ typedef struct { char _[STORE_CLOCK_EXT_SIZE]; } addrtype;
asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc");
}
static inline unsigned long long get_tod_clock(void)
{
- unsigned char clk[16];
+ unsigned char clk[STORE_CLOCK_EXT_SIZE];
+
get_tod_clock_ext(clk);
return *((unsigned long long *)&clk[1]);
}
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index 2b446cf0cc65..67878af257a0 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -289,7 +289,8 @@
#define __NR_bpf 351
#define __NR_s390_pci_mmio_write 352
#define __NR_s390_pci_mmio_read 353
-#define NR_syscalls 354
+#define __NR_execveat 354
+#define NR_syscalls 355
/*
* There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index a2987243bc76..939ec474b1dd 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -362,3 +362,4 @@ SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */
SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf)
SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write)
SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read)
+SYSCALL(sys_execveat,sys_execveat,compat_sys_execveat)
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
index f6b3cd056ec2..cc7328080b60 100644
--- a/arch/s390/kernel/uprobes.c
+++ b/arch/s390/kernel/uprobes.c
@@ -48,6 +48,30 @@ bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
return false;
}
+static int check_per_event(unsigned short cause, unsigned long control,
+ struct pt_regs *regs)
+{
+ if (!(regs->psw.mask & PSW_MASK_PER))
+ return 0;
+ /* user space single step */
+ if (control == 0)
+ return 1;
+ /* over indication for storage alteration */
+ if ((control & 0x20200000) && (cause & 0x2000))
+ return 1;
+ if (cause & 0x8000) {
+ /* all branches */
+ if ((control & 0x80800000) == 0x80000000)
+ return 1;
+ /* branch into selected range */
+ if (((control & 0x80800000) == 0x80800000) &&
+ regs->psw.addr >= current->thread.per_user.start &&
+ regs->psw.addr <= current->thread.per_user.end)
+ return 1;
+ }
+ return 0;
+}
+
int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
int fixup = probe_get_fixup_type(auprobe->insn);
@@ -71,9 +95,13 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
if (regs->psw.addr - utask->xol_vaddr == ilen)
regs->psw.addr = utask->vaddr + ilen;
}
- /* If per tracing was active generate trap */
- if (regs->psw.mask & PSW_MASK_PER)
- do_per_trap(regs);
+ if (check_per_event(current->thread.per_event.cause,
+ current->thread.per_user.control, regs)) {
+ /* fix per address */
+ current->thread.per_event.address = utask->vaddr;
+ /* trigger per event */
+ set_pt_regs_flag(regs, PIF_PER_TRAP);
+ }
return 0;
}
@@ -106,6 +134,7 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
clear_thread_flag(TIF_UPROBE_SINGLESTEP);
regs->int_code = auprobe->saved_int_code;
regs->psw.addr = current->utask->vaddr;
+ current->thread.per_event.address = current->utask->vaddr;
}
unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
@@ -146,17 +175,20 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len)
__rc; \
})
-#define emu_store_ril(ptr, input) \
+#define emu_store_ril(regs, ptr, input) \
({ \
unsigned int mask = sizeof(*(ptr)) - 1; \
+ __typeof__(ptr) __ptr = (ptr); \
int __rc = 0; \
\
if (!test_facility(34)) \
__rc = EMU_ILLEGAL_OP; \
- else if ((u64 __force)ptr & mask) \
+ else if ((u64 __force)__ptr & mask) \
__rc = EMU_SPECIFICATION; \
- else if (put_user(*(input), ptr)) \
+ else if (put_user(*(input), __ptr)) \
__rc = EMU_ADDRESSING; \
+ if (__rc == 0) \
+ sim_stor_event(regs, __ptr, mask + 1); \
__rc; \
})
@@ -198,6 +230,25 @@ union split_register {
};
/*
+ * If user per registers are setup to trace storage alterations and an
+ * emulated store took place on a fitting address a user trap is generated.
+ */
+static void sim_stor_event(struct pt_regs *regs, void *addr, int len)
+{
+ if (!(regs->psw.mask & PSW_MASK_PER))
+ return;
+ if (!(current->thread.per_user.control & PER_EVENT_STORE))
+ return;
+ if ((void *)current->thread.per_user.start > (addr + len))
+ return;
+ if ((void *)current->thread.per_user.end < addr)
+ return;
+ current->thread.per_event.address = regs->psw.addr;
+ current->thread.per_event.cause = PER_EVENT_STORE >> 16;
+ set_pt_regs_flag(regs, PIF_PER_TRAP);
+}
+
+/*
* pc relative instructions are emulated, since parameters may not be
* accessible from the xol area due to range limitations.
*/
@@ -249,13 +300,13 @@ static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs)
rc = emu_load_ril((u32 __user *)uptr, &rx->u64);
break;
case 0x07: /* sthrl */
- rc = emu_store_ril((u16 __user *)uptr, &rx->u16[3]);
+ rc = emu_store_ril(regs, (u16 __user *)uptr, &rx->u16[3]);
break;
case 0x0b: /* stgrl */
- rc = emu_store_ril((u64 __user *)uptr, &rx->u64);
+ rc = emu_store_ril(regs, (u64 __user *)uptr, &rx->u64);
break;
case 0x0f: /* strl */
- rc = emu_store_ril((u32 __user *)uptr, &rx->u32[1]);
+ rc = emu_store_ril(regs, (u32 __user *)uptr, &rx->u32[1]);
break;
}
break;
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 7f0089d9a4aa..e34122e539a1 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -128,8 +128,6 @@ void vtime_account_irq_enter(struct task_struct *tsk)
struct thread_info *ti = task_thread_info(tsk);
u64 timer, system;
- WARN_ON_ONCE(!irqs_disabled());
-
timer = S390_lowcore.last_update_timer;
S390_lowcore.last_update_timer = get_vtimer();
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index be99357d238c..3cf8cc03fff6 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -322,11 +322,12 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
static unsigned long __gmap_segment_gaddr(unsigned long *entry)
{
struct page *page;
- unsigned long offset;
+ unsigned long offset, mask;
offset = (unsigned long) entry / sizeof(unsigned long);
offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
- page = pmd_to_page((pmd_t *) entry);
+ mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
+ page = virt_to_page((void *)((unsigned long) entry & mask));
return page->index + offset;
}
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index c52ac77408ca..524496d47ef5 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -431,8 +431,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
EMIT4_DISP(0x88500000, K);
break;
case BPF_ALU | BPF_NEG: /* A = -A */
- /* lnr %r5,%r5 */
- EMIT2(0x1155);
+ /* lcr %r5,%r5 */
+ EMIT2(0x1355);
break;
case BPF_JMP | BPF_JA: /* ip += K */
offset = addrs[i + K] + jit->start - jit->prg;
@@ -502,8 +502,8 @@ branch: if (filter->jt == filter->jf) {
xbranch: /* Emit compare if the branch targets are different */
if (filter->jt != filter->jf) {
jit->seen |= SEEN_XREG;
- /* cr %r5,%r12 */
- EMIT2(0x195c);
+ /* clr %r5,%r12 */
+ EMIT2(0x155c);
}
goto branch;
case BPF_JMP | BPF_JSET | BPF_X: /* ip += (A & X) ? jt : jf */
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 5b016e2498f3..3db07f30636f 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -51,6 +51,7 @@ targets += cpustr.h
$(obj)/cpustr.h: $(obj)/mkcpustr FORCE
$(call if_changed,cpustr)
endif
+clean-files += cpustr.h
# ---------------------------------------------------------------------------
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index fd0f848938cc..5a4a089e8b1f 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -26,7 +26,6 @@ obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o
obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o
-obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/
obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o
obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o
obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o
@@ -46,6 +45,7 @@ endif
ifeq ($(avx2_supported),yes)
obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o
obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o
+ obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/
endif
aes-i586-y := aes-i586-asm_32.o aes_glue.o
diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
index 2df2a0298f5a..a916c4a61165 100644
--- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
+++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
@@ -208,7 +208,7 @@ ddq_add_8:
.if (klen == KEY_128)
.if (load_keys)
- vmovdqa 3*16(p_keys), xkeyA
+ vmovdqa 3*16(p_keys), xkey4
.endif
.else
vmovdqa 3*16(p_keys), xkeyA
@@ -224,7 +224,7 @@ ddq_add_8:
add $(16*by), p_in
.if (klen == KEY_128)
- vmovdqa 4*16(p_keys), xkey4
+ vmovdqa 4*16(p_keys), xkeyB
.else
.if (load_keys)
vmovdqa 4*16(p_keys), xkey4
@@ -234,7 +234,12 @@ ddq_add_8:
.set i, 0
.rept by
club XDATA, i
- vaesenc xkeyA, var_xdata, var_xdata /* key 3 */
+ /* key 3 */
+ .if (klen == KEY_128)
+ vaesenc xkey4, var_xdata, var_xdata
+ .else
+ vaesenc xkeyA, var_xdata, var_xdata
+ .endif
.set i, (i +1)
.endr
@@ -243,13 +248,18 @@ ddq_add_8:
.set i, 0
.rept by
club XDATA, i
- vaesenc xkey4, var_xdata, var_xdata /* key 4 */
+ /* key 4 */
+ .if (klen == KEY_128)
+ vaesenc xkeyB, var_xdata, var_xdata
+ .else
+ vaesenc xkey4, var_xdata, var_xdata
+ .endif
.set i, (i +1)
.endr
.if (klen == KEY_128)
.if (load_keys)
- vmovdqa 6*16(p_keys), xkeyB
+ vmovdqa 6*16(p_keys), xkey8
.endif
.else
vmovdqa 6*16(p_keys), xkeyB
@@ -267,12 +277,17 @@ ddq_add_8:
.set i, 0
.rept by
club XDATA, i
- vaesenc xkeyB, var_xdata, var_xdata /* key 6 */
+ /* key 6 */
+ .if (klen == KEY_128)
+ vaesenc xkey8, var_xdata, var_xdata
+ .else
+ vaesenc xkeyB, var_xdata, var_xdata
+ .endif
.set i, (i +1)
.endr
.if (klen == KEY_128)
- vmovdqa 8*16(p_keys), xkey8
+ vmovdqa 8*16(p_keys), xkeyB
.else
.if (load_keys)
vmovdqa 8*16(p_keys), xkey8
@@ -288,7 +303,7 @@ ddq_add_8:
.if (klen == KEY_128)
.if (load_keys)
- vmovdqa 9*16(p_keys), xkeyA
+ vmovdqa 9*16(p_keys), xkey12
.endif
.else
vmovdqa 9*16(p_keys), xkeyA
@@ -297,7 +312,12 @@ ddq_add_8:
.set i, 0
.rept by
club XDATA, i
- vaesenc xkey8, var_xdata, var_xdata /* key 8 */
+ /* key 8 */
+ .if (klen == KEY_128)
+ vaesenc xkeyB, var_xdata, var_xdata
+ .else
+ vaesenc xkey8, var_xdata, var_xdata
+ .endif
.set i, (i +1)
.endr
@@ -306,7 +326,12 @@ ddq_add_8:
.set i, 0
.rept by
club XDATA, i
- vaesenc xkeyA, var_xdata, var_xdata /* key 9 */
+ /* key 9 */
+ .if (klen == KEY_128)
+ vaesenc xkey12, var_xdata, var_xdata
+ .else
+ vaesenc xkeyA, var_xdata, var_xdata
+ .endif
.set i, (i +1)
.endr
@@ -412,7 +437,6 @@ ddq_add_8:
/* main body of aes ctr load */
.macro do_aes_ctrmain key_len
-
cmp $16, num_bytes
jb .Ldo_return2\key_len
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index e7e9682a33e9..f556c4843aa1 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -80,9 +80,11 @@ static inline unsigned int __getcpu(void)
/*
* Load per CPU data from GDT. LSL is faster than RDTSCP and
- * works on all CPUs.
+ * works on all CPUs. This is volatile so that it orders
+ * correctly wrt barrier() and to keep gcc from cleverly
+ * hoisting it out of the calling function.
*/
- asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+ asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
return p;
}
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 4433a4be8171..d1626364a28a 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -750,13 +750,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
}
/* wrapper to silence section mismatch warning */
-int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
+int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu)
{
return _acpi_map_lsapic(handle, physid, pcpu);
}
-EXPORT_SYMBOL(acpi_map_lsapic);
+EXPORT_SYMBOL(acpi_map_cpu);
-int acpi_unmap_lsapic(int cpu)
+int acpi_unmap_cpu(int cpu)
{
#ifdef CONFIG_ACPI_NUMA
set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE);
@@ -768,8 +768,7 @@ int acpi_unmap_lsapic(int cpu)
return (0);
}
-
-EXPORT_SYMBOL(acpi_unmap_lsapic);
+EXPORT_SYMBOL(acpi_unmap_cpu);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index e27b49d7c922..80091ae54c2b 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -66,3 +66,4 @@ targets += capflags.c
$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE
$(call if_changed,mkcapflags)
endif
+clean-files += capflags.c
diff --git a/arch/x86/kernel/cpu/mkcapflags.sh b/arch/x86/kernel/cpu/mkcapflags.sh
index e2b22df964cd..36d99a337b49 100644
--- a/arch/x86/kernel/cpu/mkcapflags.sh
+++ b/arch/x86/kernel/cpu/mkcapflags.sh
@@ -28,7 +28,7 @@ function dump_array()
# If the /* comment */ starts with a quote string, grab that.
VALUE="$(echo "$i" | sed -n 's@.*/\* *\("[^"]*"\).*\*/@\1@p')"
[ -z "$VALUE" ] && VALUE="\"$NAME\""
- [ "$VALUE" == '""' ] && continue
+ [ "$VALUE" = '""' ] && continue
# Name is uppercase, VALUE is all lowercase
VALUE="$(echo "$VALUE" | tr A-Z a-z)"
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index 18eb78bbdd10..863d9b02563e 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -17,7 +17,7 @@
#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff)
#define UNCORE_PCI_DEV_IDX(data) (data & 0xff)
#define UNCORE_EXTRA_PCI_DEV 0xff
-#define UNCORE_EXTRA_PCI_DEV_MAX 2
+#define UNCORE_EXTRA_PCI_DEV_MAX 3
/* support up to 8 sockets */
#define UNCORE_SOCKET_MAX 8
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
index 745b158e9a65..21af6149edf2 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
@@ -891,6 +891,7 @@ void snbep_uncore_cpu_init(void)
enum {
SNBEP_PCI_QPI_PORT0_FILTER,
SNBEP_PCI_QPI_PORT1_FILTER,
+ HSWEP_PCI_PCU_3,
};
static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
@@ -2026,6 +2027,17 @@ void hswep_uncore_cpu_init(void)
{
if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
+
+ /* Detect 6-8 core systems with only two SBOXes */
+ if (uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3]) {
+ u32 capid4;
+
+ pci_read_config_dword(uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3],
+ 0x94, &capid4);
+ if (((capid4 >> 6) & 0x3) == 0)
+ hswep_uncore_sbox.num_boxes = 2;
+ }
+
uncore_msr_uncores = hswep_msr_uncores;
}
@@ -2287,6 +2299,11 @@ static DEFINE_PCI_DEVICE_TABLE(hswep_uncore_pci_ids) = {
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
SNBEP_PCI_QPI_PORT1_FILTER),
},
+ { /* PCU.3 (for Capability registers) */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
+ .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+ HSWEP_PCI_PCU_3),
+ },
{ /* end: all zeroes */ }
};
diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c
index e309cc5c276e..781861cc5ee8 100644
--- a/arch/x86/kernel/perf_regs.c
+++ b/arch/x86/kernel/perf_regs.c
@@ -78,6 +78,14 @@ u64 perf_reg_abi(struct task_struct *task)
{
return PERF_SAMPLE_REGS_ABI_32;
}
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs,
+ struct pt_regs *regs_user_copy)
+{
+ regs_user->regs = task_pt_regs(current);
+ regs_user->abi = perf_reg_abi(current);
+}
#else /* CONFIG_X86_64 */
#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \
(1ULL << PERF_REG_X86_ES) | \
@@ -102,4 +110,86 @@ u64 perf_reg_abi(struct task_struct *task)
else
return PERF_SAMPLE_REGS_ABI_64;
}
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs,
+ struct pt_regs *regs_user_copy)
+{
+ struct pt_regs *user_regs = task_pt_regs(current);
+
+ /*
+ * If we're in an NMI that interrupted task_pt_regs setup, then
+ * we can't sample user regs at all. This check isn't really
+ * sufficient, though, as we could be in an NMI inside an interrupt
+ * that happened during task_pt_regs setup.
+ */
+ if (regs->sp > (unsigned long)&user_regs->r11 &&
+ regs->sp <= (unsigned long)(user_regs + 1)) {
+ regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
+ regs_user->regs = NULL;
+ return;
+ }
+
+ /*
+ * RIP, flags, and the argument registers are usually saved.
+ * orig_ax is probably okay, too.
+ */
+ regs_user_copy->ip = user_regs->ip;
+ regs_user_copy->cx = user_regs->cx;
+ regs_user_copy->dx = user_regs->dx;
+ regs_user_copy->si = user_regs->si;
+ regs_user_copy->di = user_regs->di;
+ regs_user_copy->r8 = user_regs->r8;
+ regs_user_copy->r9 = user_regs->r9;
+ regs_user_copy->r10 = user_regs->r10;
+ regs_user_copy->r11 = user_regs->r11;
+ regs_user_copy->orig_ax = user_regs->orig_ax;
+ regs_user_copy->flags = user_regs->flags;
+
+ /*
+ * Don't even try to report the "rest" regs.
+ */
+ regs_user_copy->bx = -1;
+ regs_user_copy->bp = -1;
+ regs_user_copy->r12 = -1;
+ regs_user_copy->r13 = -1;
+ regs_user_copy->r14 = -1;
+ regs_user_copy->r15 = -1;
+
+ /*
+ * For this to be at all useful, we need a reasonable guess for
+ * sp and the ABI. Be careful: we're in NMI context, and we're
+ * considering current to be the current task, so we should
+ * be careful not to look at any other percpu variables that might
+ * change during context switches.
+ */
+ if (IS_ENABLED(CONFIG_IA32_EMULATION) &&
+ task_thread_info(current)->status & TS_COMPAT) {
+ /* Easy case: we're in a compat syscall. */
+ regs_user->abi = PERF_SAMPLE_REGS_ABI_32;
+ regs_user_copy->sp = user_regs->sp;
+ regs_user_copy->cs = user_regs->cs;
+ regs_user_copy->ss = user_regs->ss;
+ } else if (user_regs->orig_ax != -1) {
+ /*
+ * We're probably in a 64-bit syscall.
+ * Warning: this code is severely racy. At least it's better
+ * than just blindly copying user_regs.
+ */
+ regs_user->abi = PERF_SAMPLE_REGS_ABI_64;
+ regs_user_copy->sp = this_cpu_read(old_rsp);
+ regs_user_copy->cs = __USER_CS;
+ regs_user_copy->ss = __USER_DS;
+ regs_user_copy->cx = -1; /* usually contains garbage */
+ } else {
+ /* We're probably in an interrupt or exception. */
+ regs_user->abi = user_64bit_mode(user_regs) ?
+ PERF_SAMPLE_REGS_ABI_64 : PERF_SAMPLE_REGS_ABI_32;
+ regs_user_copy->sp = user_regs->sp;
+ regs_user_copy->cs = user_regs->cs;
+ regs_user_copy->ss = user_regs->ss;
+ }
+
+ regs_user->regs = regs_user_copy;
+}
#endif /* CONFIG_X86_32 */
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
index 2480978b31cc..1313ae6b478b 100644
--- a/arch/x86/lib/insn.c
+++ b/arch/x86/lib/insn.c
@@ -28,7 +28,7 @@
/* Verify next sizeof(t) bytes can be on the same instruction */
#define validate_next(t, insn, n) \
- ((insn)->next_byte + sizeof(t) + n < (insn)->end_kaddr)
+ ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
#define __get_next(t, insn) \
({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index a97ee0801475..08a7d313538a 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -438,20 +438,20 @@ static unsigned long __init init_range_memory_mapping(
static unsigned long __init get_new_step_size(unsigned long step_size)
{
/*
- * Explain why we shift by 5 and why we don't have to worry about
- * 'step_size << 5' overflowing:
- *
- * initial mapped size is PMD_SIZE (2M).
+ * Initial mapped size is PMD_SIZE (2M).
* We can not set step_size to be PUD_SIZE (1G) yet.
* In worse case, when we cross the 1G boundary, and
* PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k)
- * to map 1G range with PTE. Use 5 as shift for now.
+ * to map 1G range with PTE. Hence we use one less than the
+ * difference of page table level shifts.
*
- * Don't need to worry about overflow, on 32bit, when step_size
- * is 0, round_down() returns 0 for start, and that turns it
- * into 0x100000000ULL.
+ * Don't need to worry about overflow in the top-down case, on 32bit,
+ * when step_size is 0, round_down() returns 0 for start, and that
+ * turns it into 0x100000000ULL.
+ * In the bottom-up case, round_up(x, 0) returns 0 though too, which
+ * needs to be taken into consideration by the code below.
*/
- return step_size << 5;
+ return step_size << (PMD_SHIFT - PAGE_SHIFT - 1);
}
/**
@@ -471,7 +471,6 @@ static void __init memory_map_top_down(unsigned long map_start,
unsigned long step_size;
unsigned long addr;
unsigned long mapped_ram_size = 0;
- unsigned long new_mapped_ram_size;
/* xen has big range in reserved near end of ram, skip it at first.*/
addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
@@ -496,14 +495,12 @@ static void __init memory_map_top_down(unsigned long map_start,
start = map_start;
} else
start = map_start;
- new_mapped_ram_size = init_range_memory_mapping(start,
+ mapped_ram_size += init_range_memory_mapping(start,
last_start);
last_start = start;
min_pfn_mapped = last_start >> PAGE_SHIFT;
- /* only increase step_size after big range get mapped */
- if (new_mapped_ram_size > mapped_ram_size)
+ if (mapped_ram_size >= step_size)
step_size = get_new_step_size(step_size);
- mapped_ram_size += new_mapped_ram_size;
}
if (real_end < map_end)
@@ -524,7 +521,7 @@ static void __init memory_map_top_down(unsigned long map_start,
static void __init memory_map_bottom_up(unsigned long map_start,
unsigned long map_end)
{
- unsigned long next, new_mapped_ram_size, start;
+ unsigned long next, start;
unsigned long mapped_ram_size = 0;
/* step_size need to be small so pgt_buf from BRK could cover it */
unsigned long step_size = PMD_SIZE;
@@ -539,19 +536,19 @@ static void __init memory_map_bottom_up(unsigned long map_start,
* for page table.
*/
while (start < map_end) {
- if (map_end - start > step_size) {
+ if (step_size && map_end - start > step_size) {
next = round_up(start + 1, step_size);
if (next > map_end)
next = map_end;
- } else
+ } else {
next = map_end;
+ }
- new_mapped_ram_size = init_range_memory_mapping(start, next);
+ mapped_ram_size += init_range_memory_mapping(start, next);
start = next;
- if (new_mapped_ram_size > mapped_ram_size)
+ if (mapped_ram_size >= step_size)
step_size = get_new_step_size(step_size);
- mapped_ram_size += new_mapped_ram_size;
}
}
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index 009495b9ab4b..1c9f750c3859 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -41,12 +41,17 @@ void __init init_vdso_image(const struct vdso_image *image)
struct linux_binprm;
-/* Put the vdso above the (randomized) stack with another randomized offset.
- This way there is no hole in the middle of address space.
- To save memory make sure it is still in the same PTE as the stack top.
- This doesn't give that many random bits.
-
- Only used for the 64-bit and x32 vdsos. */
+/*
+ * Put the vdso above the (randomized) stack with another randomized
+ * offset. This way there is no hole in the middle of address space.
+ * To save memory make sure it is still in the same PTE as the stack
+ * top. This doesn't give that many random bits.
+ *
+ * Note that this algorithm is imperfect: the distribution of the vdso
+ * start address within a PMD is biased toward the end.
+ *
+ * Only used for the 64-bit and x32 vdsos.
+ */
static unsigned long vdso_addr(unsigned long start, unsigned len)
{
#ifdef CONFIG_X86_32
@@ -54,22 +59,30 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
#else
unsigned long addr, end;
unsigned offset;
- end = (start + PMD_SIZE - 1) & PMD_MASK;
+
+ /*
+ * Round up the start address. It can start out unaligned as a result
+ * of stack start randomization.
+ */
+ start = PAGE_ALIGN(start);
+
+ /* Round the lowest possible end address up to a PMD boundary. */
+ end = (start + len + PMD_SIZE - 1) & PMD_MASK;
if (end >= TASK_SIZE_MAX)
end = TASK_SIZE_MAX;
end -= len;
- /* This loses some more bits than a modulo, but is cheaper */
- offset = get_random_int() & (PTRS_PER_PTE - 1);
- addr = start + (offset << PAGE_SHIFT);
- if (addr >= end)
- addr = end;
+
+ if (end > start) {
+ offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
+ addr = start + (offset << PAGE_SHIFT);
+ } else {
+ addr = start;
+ }
/*
- * page-align it here so that get_unmapped_area doesn't
- * align it wrongfully again to the next page. addr can come in 4K
- * unaligned here as a result of stack start randomization.
+ * Forcibly align the final address in case we have a hardware
+ * issue that requires alignment for performance reasons.
*/
- addr = PAGE_ALIGN(addr);
addr = align_vdso_addr(addr);
return addr;
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 6bf3a13e3e0f..78a881b7fc41 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -40,6 +40,7 @@
#include <xen/interface/physdev.h>
#include <xen/interface/vcpu.h>
#include <xen/interface/memory.h>
+#include <xen/interface/nmi.h>
#include <xen/interface/xen-mca.h>
#include <xen/features.h>
#include <xen/page.h>
@@ -66,6 +67,7 @@
#include <asm/reboot.h>
#include <asm/stackprotector.h>
#include <asm/hypervisor.h>
+#include <asm/mach_traps.h>
#include <asm/mwait.h>
#include <asm/pci_x86.h>
#include <asm/pat.h>
@@ -1351,6 +1353,21 @@ static const struct machine_ops xen_machine_ops __initconst = {
.emergency_restart = xen_emergency_restart,
};
+static unsigned char xen_get_nmi_reason(void)
+{
+ unsigned char reason = 0;
+
+ /* Construct a value which looks like it came from port 0x61. */
+ if (test_bit(_XEN_NMIREASON_io_error,
+ &HYPERVISOR_shared_info->arch.nmi_reason))
+ reason |= NMI_REASON_IOCHK;
+ if (test_bit(_XEN_NMIREASON_pci_serr,
+ &HYPERVISOR_shared_info->arch.nmi_reason))
+ reason |= NMI_REASON_SERR;
+
+ return reason;
+}
+
static void __init xen_boot_params_init_edd(void)
{
#if IS_ENABLED(CONFIG_EDD)
@@ -1535,9 +1552,12 @@ asmlinkage __visible void __init xen_start_kernel(void)
pv_info = xen_info;
pv_init_ops = xen_init_ops;
pv_apic_ops = xen_apic_ops;
- if (!xen_pvh_domain())
+ if (!xen_pvh_domain()) {
pv_cpu_ops = xen_cpu_ops;
+ x86_platform.get_nmi_reason = xen_get_nmi_reason;
+ }
+
if (xen_feature(XENFEAT_auto_translated_physmap))
x86_init.resources.memory_setup = xen_auto_xlated_memory_setup;
else
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index edbc7a63fd73..70fb5075c901 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -167,10 +167,13 @@ static void * __ref alloc_p2m_page(void)
return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
}
-/* Only to be called in case of a race for a page just allocated! */
-static void free_p2m_page(void *p)
+static void __ref free_p2m_page(void *p)
{
- BUG_ON(!slab_is_available());
+ if (unlikely(!slab_is_available())) {
+ free_bootmem((unsigned long)p, PAGE_SIZE);
+ return;
+ }
+
free_page((unsigned long)p);
}
@@ -375,7 +378,7 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
p2m_missing_pte : p2m_identity_pte;
for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
pmdp = populate_extra_pmd(
- (unsigned long)(p2m + pfn + i * PTRS_PER_PTE));
+ (unsigned long)(p2m + pfn) + i * PMD_SIZE);
set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE));
}
}
@@ -436,10 +439,9 @@ EXPORT_SYMBOL_GPL(get_phys_to_machine);
* a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual
* pmd. In case of PAE/x86-32 there are multiple pmds to allocate!
*/
-static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
+static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg)
{
pte_t *ptechk;
- pte_t *pteret = ptep;
pte_t *pte_newpg[PMDS_PER_MID_PAGE];
pmd_t *pmdp;
unsigned int level;
@@ -473,8 +475,6 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
if (ptechk == pte_pg) {
set_pmd(pmdp,
__pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE));
- if (vaddr == (addr & ~(PMD_SIZE - 1)))
- pteret = pte_offset_kernel(pmdp, addr);
pte_newpg[i] = NULL;
}
@@ -488,7 +488,7 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
vaddr += PMD_SIZE;
}
- return pteret;
+ return lookup_address(addr, &level);
}
/*
@@ -517,7 +517,7 @@ static bool alloc_p2m(unsigned long pfn)
if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) {
/* PMD level is missing, allocate a new one */
- ptep = alloc_p2m_pmd(addr, ptep, pte_pg);
+ ptep = alloc_p2m_pmd(addr, pte_pg);
if (!ptep)
return false;
}
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index dfd77dec8e2b..865e56cea7a0 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -140,7 +140,7 @@ static void __init xen_del_extra_mem(u64 start, u64 size)
unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
{
int i;
- unsigned long addr = PFN_PHYS(pfn);
+ phys_addr_t addr = PFN_PHYS(pfn);
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
if (addr >= xen_extra_mem[i].start &&
@@ -160,6 +160,8 @@ void __init xen_inv_extra_mem(void)
int i;
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+ if (!xen_extra_mem[i].size)
+ continue;
pfn_s = PFN_DOWN(xen_extra_mem[i].start);
pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size);
for (pfn = pfn_s; pfn < pfn_e; pfn++)
@@ -229,15 +231,14 @@ static int __init xen_free_mfn(unsigned long mfn)
* as a fallback if the remapping fails.
*/
static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
- unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity,
- unsigned long *released)
+ unsigned long end_pfn, unsigned long nr_pages, unsigned long *released)
{
- unsigned long len = 0;
unsigned long pfn, end;
int ret;
WARN_ON(start_pfn > end_pfn);
+ /* Release pages first. */
end = min(end_pfn, nr_pages);
for (pfn = start_pfn; pfn < end; pfn++) {
unsigned long mfn = pfn_to_mfn(pfn);
@@ -250,16 +251,14 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
if (ret == 1) {
+ (*released)++;
if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
break;
- len++;
} else
break;
}
- /* Need to release pages first */
- *released += len;
- *identity += set_phys_range_identity(start_pfn, end_pfn);
+ set_phys_range_identity(start_pfn, end_pfn);
}
/*
@@ -287,7 +286,7 @@ static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
}
/* Update kernel mapping, but not for highmem. */
- if ((pfn << PAGE_SHIFT) >= __pa(high_memory))
+ if (pfn >= PFN_UP(__pa(high_memory - 1)))
return;
if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
@@ -318,7 +317,6 @@ static void __init xen_do_set_identity_and_remap_chunk(
unsigned long ident_pfn_iter, remap_pfn_iter;
unsigned long ident_end_pfn = start_pfn + size;
unsigned long left = size;
- unsigned long ident_cnt = 0;
unsigned int i, chunk;
WARN_ON(size == 0);
@@ -347,8 +345,7 @@ static void __init xen_do_set_identity_and_remap_chunk(
xen_remap_mfn = mfn;
/* Set identity map */
- ident_cnt += set_phys_range_identity(ident_pfn_iter,
- ident_pfn_iter + chunk);
+ set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
left -= chunk;
}
@@ -371,7 +368,7 @@ static void __init xen_do_set_identity_and_remap_chunk(
static unsigned long __init xen_set_identity_and_remap_chunk(
const struct e820entry *list, size_t map_size, unsigned long start_pfn,
unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn,
- unsigned long *identity, unsigned long *released)
+ unsigned long *released, unsigned long *remapped)
{
unsigned long pfn;
unsigned long i = 0;
@@ -386,8 +383,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
/* Do not remap pages beyond the current allocation */
if (cur_pfn >= nr_pages) {
/* Identity map remaining pages */
- *identity += set_phys_range_identity(cur_pfn,
- cur_pfn + size);
+ set_phys_range_identity(cur_pfn, cur_pfn + size);
break;
}
if (cur_pfn + size > nr_pages)
@@ -398,7 +394,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
if (!remap_range_size) {
pr_warning("Unable to find available pfn range, not remapping identity pages\n");
xen_set_identity_and_release_chunk(cur_pfn,
- cur_pfn + left, nr_pages, identity, released);
+ cur_pfn + left, nr_pages, released);
break;
}
/* Adjust size to fit in current e820 RAM region */
@@ -410,7 +406,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
/* Update variables to reflect new mappings. */
i += size;
remap_pfn += size;
- *identity += size;
+ *remapped += size;
}
/*
@@ -427,13 +423,13 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
static void __init xen_set_identity_and_remap(
const struct e820entry *list, size_t map_size, unsigned long nr_pages,
- unsigned long *released)
+ unsigned long *released, unsigned long *remapped)
{
phys_addr_t start = 0;
- unsigned long identity = 0;
unsigned long last_pfn = nr_pages;
const struct e820entry *entry;
unsigned long num_released = 0;
+ unsigned long num_remapped = 0;
int i;
/*
@@ -460,14 +456,14 @@ static void __init xen_set_identity_and_remap(
last_pfn = xen_set_identity_and_remap_chunk(
list, map_size, start_pfn,
end_pfn, nr_pages, last_pfn,
- &identity, &num_released);
+ &num_released, &num_remapped);
start = end;
}
}
*released = num_released;
+ *remapped = num_remapped;
- pr_info("Set %ld page(s) to 1-1 mapping\n", identity);
pr_info("Released %ld page(s)\n", num_released);
}
@@ -586,6 +582,7 @@ char * __init xen_memory_setup(void)
struct xen_memory_map memmap;
unsigned long max_pages;
unsigned long extra_pages = 0;
+ unsigned long remapped_pages;
int i;
int op;
@@ -635,9 +632,10 @@ char * __init xen_memory_setup(void)
* underlying RAM.
*/
xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
- &xen_released_pages);
+ &xen_released_pages, &remapped_pages);
extra_pages += xen_released_pages;
+ extra_pages += remapped_pages;
/*
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index f473d268d387..69087341d9ae 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -391,7 +391,7 @@ static const struct clock_event_device *xen_clockevent =
struct xen_clock_event_device {
struct clock_event_device evt;
- char *name;
+ char name[16];
};
static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 };
@@ -420,46 +420,38 @@ void xen_teardown_timer(int cpu)
if (evt->irq >= 0) {
unbind_from_irqhandler(evt->irq, NULL);
evt->irq = -1;
- kfree(per_cpu(xen_clock_events, cpu).name);
- per_cpu(xen_clock_events, cpu).name = NULL;
}
}
void xen_setup_timer(int cpu)
{
- char *name;
- struct clock_event_device *evt;
+ struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu);
+ struct clock_event_device *evt = &xevt->evt;
int irq;
- evt = &per_cpu(xen_clock_events, cpu).evt;
WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
if (evt->irq >= 0)
xen_teardown_timer(cpu);
printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
- name = kasprintf(GFP_KERNEL, "timer%d", cpu);
- if (!name)
- name = "<timer kasprintf failed>";
+ snprintf(xevt->name, sizeof(xevt->name), "timer%d", cpu);
irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER|
IRQF_FORCE_RESUME|IRQF_EARLY_RESUME,
- name, NULL);
+ xevt->name, NULL);
(void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX);
memcpy(evt, xen_clockevent, sizeof(*evt));
evt->cpumask = cpumask_of(cpu);
evt->irq = irq;
- per_cpu(xen_clock_events, cpu).name = name;
}
void xen_setup_cpu_clockevents(void)
{
- BUG_ON(preemptible());
-
clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt));
}
diff --git a/block/blk-core.c b/block/blk-core.c
index 30f6153a40c2..3ad405571dcc 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -473,6 +473,25 @@ void blk_queue_bypass_end(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
+void blk_set_queue_dying(struct request_queue *q)
+{
+ queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
+
+ if (q->mq_ops)
+ blk_mq_wake_waiters(q);
+ else {
+ struct request_list *rl;
+
+ blk_queue_for_each_rl(rl, q) {
+ if (rl->rq_pool) {
+ wake_up(&rl->wait[BLK_RW_SYNC]);
+ wake_up(&rl->wait[BLK_RW_ASYNC]);
+ }
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(blk_set_queue_dying);
+
/**
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
@@ -486,7 +505,7 @@ void blk_cleanup_queue(struct request_queue *q)
/* mark @q DYING, no new request or merges will be allowed afterwards */
mutex_lock(&q->sysfs_lock);
- queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
+ blk_set_queue_dying(q);
spin_lock_irq(lock);
/*
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 32e8dbb9ad1c..60c9d4a93fe4 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -68,9 +68,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
}
/*
- * Wakeup all potentially sleeping on normal (non-reserved) tags
+ * Wakeup all potentially sleeping on tags
*/
-static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
+void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
{
struct blk_mq_bitmap_tags *bt;
int i, wake_index;
@@ -85,6 +85,12 @@ static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
wake_index = bt_index_inc(wake_index);
}
+
+ if (include_reserve) {
+ bt = &tags->breserved_tags;
+ if (waitqueue_active(&bt->bs[0].wait))
+ wake_up(&bt->bs[0].wait);
+ }
}
/*
@@ -100,7 +106,7 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
atomic_dec(&tags->active_queues);
- blk_mq_tag_wakeup_all(tags);
+ blk_mq_tag_wakeup_all(tags, false);
}
/*
@@ -584,7 +590,7 @@ int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
* static and should never need resizing.
*/
bt_update_count(&tags->bitmap_tags, tdepth);
- blk_mq_tag_wakeup_all(tags);
+ blk_mq_tag_wakeup_all(tags, false);
return 0;
}
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 6206ed17ef76..a6fa0fc9d41a 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -54,6 +54,7 @@ extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
+extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
enum {
BLK_MQ_TAG_CACHE_MIN = 1,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index da1ab5641227..2f95747c287e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -107,7 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
wake_up_all(&q->mq_freeze_wq);
}
-static void blk_mq_freeze_queue_start(struct request_queue *q)
+void blk_mq_freeze_queue_start(struct request_queue *q)
{
bool freeze;
@@ -120,6 +120,7 @@ static void blk_mq_freeze_queue_start(struct request_queue *q)
blk_mq_run_queues(q, false);
}
}
+EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
static void blk_mq_freeze_queue_wait(struct request_queue *q)
{
@@ -136,7 +137,7 @@ void blk_mq_freeze_queue(struct request_queue *q)
blk_mq_freeze_queue_wait(q);
}
-static void blk_mq_unfreeze_queue(struct request_queue *q)
+void blk_mq_unfreeze_queue(struct request_queue *q)
{
bool wake;
@@ -149,6 +150,24 @@ static void blk_mq_unfreeze_queue(struct request_queue *q)
wake_up_all(&q->mq_freeze_wq);
}
}
+EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
+
+void blk_mq_wake_waiters(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ unsigned int i;
+
+ queue_for_each_hw_ctx(q, hctx, i)
+ if (blk_mq_hw_queue_mapped(hctx))
+ blk_mq_tag_wakeup_all(hctx->tags, true);
+
+ /*
+ * If we are called because the queue has now been marked as
+ * dying, we need to ensure that processes currently waiting on
+ * the queue are notified as well.
+ */
+ wake_up_all(&q->mq_freeze_wq);
+}
bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
{
@@ -258,8 +277,10 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
ctx = alloc_data.ctx;
}
blk_mq_put_ctx(ctx);
- if (!rq)
+ if (!rq) {
+ blk_mq_queue_exit(q);
return ERR_PTR(-EWOULDBLOCK);
+ }
return rq;
}
EXPORT_SYMBOL(blk_mq_alloc_request);
@@ -383,6 +404,12 @@ void blk_mq_complete_request(struct request *rq)
}
EXPORT_SYMBOL(blk_mq_complete_request);
+int blk_mq_request_started(struct request *rq)
+{
+ return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+}
+EXPORT_SYMBOL_GPL(blk_mq_request_started);
+
void blk_mq_start_request(struct request *rq)
{
struct request_queue *q = rq->q;
@@ -500,12 +527,38 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
}
EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
+void blk_mq_cancel_requeue_work(struct request_queue *q)
+{
+ cancel_work_sync(&q->requeue_work);
+}
+EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
+
void blk_mq_kick_requeue_list(struct request_queue *q)
{
kblockd_schedule_work(&q->requeue_work);
}
EXPORT_SYMBOL(blk_mq_kick_requeue_list);
+void blk_mq_abort_requeue_list(struct request_queue *q)
+{
+ unsigned long flags;
+ LIST_HEAD(rq_list);
+
+ spin_lock_irqsave(&q->requeue_lock, flags);
+ list_splice_init(&q->requeue_list, &rq_list);
+ spin_unlock_irqrestore(&q->requeue_lock, flags);
+
+ while (!list_empty(&rq_list)) {
+ struct request *rq;
+
+ rq = list_first_entry(&rq_list, struct request, queuelist);
+ list_del_init(&rq->queuelist);
+ rq->errors = -EIO;
+ blk_mq_end_request(rq, rq->errors);
+ }
+}
+EXPORT_SYMBOL(blk_mq_abort_requeue_list);
+
static inline bool is_flush_request(struct request *rq,
struct blk_flush_queue *fq, unsigned int tag)
{
@@ -566,13 +619,24 @@ void blk_mq_rq_timed_out(struct request *req, bool reserved)
break;
}
}
-
+
static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
struct request *rq, void *priv, bool reserved)
{
struct blk_mq_timeout_data *data = priv;
- if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
+ if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
+ /*
+ * If a request wasn't started before the queue was
+ * marked dying, kill it here or it'll go unnoticed.
+ */
+ if (unlikely(blk_queue_dying(rq->q))) {
+ rq->errors = -EIO;
+ blk_mq_complete_request(rq);
+ }
+ return;
+ }
+ if (rq->cmd_flags & REQ_NO_TIMEOUT)
return;
if (time_after_eq(jiffies, rq->deadline)) {
@@ -1601,7 +1665,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
hctx->queue = q;
hctx->queue_num = hctx_idx;
hctx->flags = set->flags;
- hctx->cmd_size = set->cmd_size;
blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
blk_mq_hctx_notify, hctx);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 206230e64f79..4f4f943c22c3 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -32,6 +32,7 @@ void blk_mq_free_queue(struct request_queue *q);
void blk_mq_clone_flush_request(struct request *flush_rq,
struct request *orig_rq);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
+void blk_mq_wake_waiters(struct request_queue *q);
/*
* CPU hotplug helpers
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 56c025894cdf..246dfb16c3d9 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -190,6 +190,9 @@ void blk_add_timer(struct request *req)
struct request_queue *q = req->q;
unsigned long expiry;
+ if (req->cmd_flags & REQ_NO_TIMEOUT)
+ return;
+
/* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
if (!q->mq_ops && !q->rq_timed_out_fn)
return;
diff --git a/drivers/Makefile b/drivers/Makefile
index 67d2334dc41e..527a6da8d539 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -50,7 +50,10 @@ obj-$(CONFIG_RESET_CONTROLLER) += reset/
obj-y += tty/
obj-y += char/
-# gpu/ comes after char for AGP vs DRM startup
+# iommu/ comes before gpu as gpu are using iommu controllers
+obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
+
+# gpu/ comes after char for AGP vs DRM startup and after iommu
obj-y += gpu/
obj-$(CONFIG_CONNECTOR) += connector/
@@ -141,7 +144,6 @@ obj-y += clk/
obj-$(CONFIG_MAILBOX) += mailbox/
obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
-obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
obj-$(CONFIG_REMOTEPROC) += remoteproc/
obj-$(CONFIG_RPMSG) += rpmsg/
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 1fdf5e07a1c7..1020b1b53a17 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -170,7 +170,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
acpi_status status;
int ret;
- if (pr->apic_id == -1)
+ if (pr->phys_id == -1)
return -ENODEV;
status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
@@ -180,13 +180,13 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
cpu_maps_update_begin();
cpu_hotplug_begin();
- ret = acpi_map_lsapic(pr->handle, pr->apic_id, &pr->id);
+ ret = acpi_map_cpu(pr->handle, pr->phys_id, &pr->id);
if (ret)
goto out;
ret = arch_register_cpu(pr->id);
if (ret) {
- acpi_unmap_lsapic(pr->id);
+ acpi_unmap_cpu(pr->id);
goto out;
}
@@ -215,7 +215,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
union acpi_object object = { 0 };
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
struct acpi_processor *pr = acpi_driver_data(device);
- int apic_id, cpu_index, device_declaration = 0;
+ int phys_id, cpu_index, device_declaration = 0;
acpi_status status = AE_OK;
static int cpu0_initialized;
unsigned long long value;
@@ -262,15 +262,18 @@ static int acpi_processor_get_info(struct acpi_device *device)
pr->acpi_id = value;
}
- apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id);
- if (apic_id < 0)
- acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n");
- pr->apic_id = apic_id;
+ phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr->acpi_id);
+ if (phys_id < 0)
+ acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n");
+ pr->phys_id = phys_id;
- cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
+ cpu_index = acpi_map_cpuid(pr->phys_id, pr->acpi_id);
if (!cpu0_initialized && !acpi_has_cpu_in_madt()) {
cpu0_initialized = 1;
- /* Handle UP system running SMP kernel, with no LAPIC in MADT */
+ /*
+ * Handle UP system running SMP kernel, with no CPU
+ * entry in MADT
+ */
if ((cpu_index == -1) && (num_online_cpus() == 1))
cpu_index = 0;
}
@@ -458,7 +461,7 @@ static void acpi_processor_remove(struct acpi_device *device)
/* Remove the CPU. */
arch_unregister_cpu(pr->id);
- acpi_unmap_lsapic(pr->id);
+ acpi_unmap_cpu(pr->id);
cpu_hotplug_done();
cpu_maps_update_done();
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index c2daa85fc9f7..c0d44d394ca3 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -257,7 +257,7 @@ int acpi_bus_init_power(struct acpi_device *device)
device->power.state = ACPI_STATE_UNKNOWN;
if (!acpi_device_is_present(device))
- return 0;
+ return -ENXIO;
result = acpi_device_get_power(device, &state);
if (result)
diff --git a/drivers/acpi/int340x_thermal.c b/drivers/acpi/int340x_thermal.c
index a27d31d1ba24..9dcf83682e36 100644
--- a/drivers/acpi/int340x_thermal.c
+++ b/drivers/acpi/int340x_thermal.c
@@ -14,10 +14,10 @@
#include "internal.h"
-#define DO_ENUMERATION 0x01
+#define INT3401_DEVICE 0X01
static const struct acpi_device_id int340x_thermal_device_ids[] = {
- {"INT3400", DO_ENUMERATION },
- {"INT3401"},
+ {"INT3400"},
+ {"INT3401", INT3401_DEVICE},
{"INT3402"},
{"INT3403"},
{"INT3404"},
@@ -34,7 +34,10 @@ static int int340x_thermal_handler_attach(struct acpi_device *adev,
const struct acpi_device_id *id)
{
#if defined(CONFIG_INT340X_THERMAL) || defined(CONFIG_INT340X_THERMAL_MODULE)
- if (id->driver_data == DO_ENUMERATION)
+ acpi_create_platform_device(adev);
+#elif defined(INTEL_SOC_DTS_THERMAL) || defined(INTEL_SOC_DTS_THERMAL_MODULE)
+ /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */
+ if (id->driver_data == INT3401_DEVICE)
acpi_create_platform_device(adev);
#endif
return 1;
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 342942f90a10..02e48394276c 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -69,7 +69,7 @@ static int map_madt_entry(int type, u32 acpi_id)
unsigned long madt_end, entry;
static struct acpi_table_madt *madt;
static int read_madt;
- int apic_id = -1;
+ int phys_id = -1; /* CPU hardware ID */
if (!read_madt) {
if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
@@ -79,7 +79,7 @@ static int map_madt_entry(int type, u32 acpi_id)
}
if (!madt)
- return apic_id;
+ return phys_id;
entry = (unsigned long)madt;
madt_end = entry + madt->header.length;
@@ -91,18 +91,18 @@ static int map_madt_entry(int type, u32 acpi_id)
struct acpi_subtable_header *header =
(struct acpi_subtable_header *)entry;
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
- if (!map_lapic_id(header, acpi_id, &apic_id))
+ if (!map_lapic_id(header, acpi_id, &phys_id))
break;
} else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
- if (!map_x2apic_id(header, type, acpi_id, &apic_id))
+ if (!map_x2apic_id(header, type, acpi_id, &phys_id))
break;
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
- if (!map_lsapic_id(header, type, acpi_id, &apic_id))
+ if (!map_lsapic_id(header, type, acpi_id, &phys_id))
break;
}
entry += header->length;
}
- return apic_id;
+ return phys_id;
}
static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
@@ -110,7 +110,7 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
struct acpi_subtable_header *header;
- int apic_id = -1;
+ int phys_id = -1;
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
goto exit;
@@ -126,38 +126,38 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
header = (struct acpi_subtable_header *)obj->buffer.pointer;
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
- map_lapic_id(header, acpi_id, &apic_id);
+ map_lapic_id(header, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
- map_lsapic_id(header, type, acpi_id, &apic_id);
+ map_lsapic_id(header, type, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
- map_x2apic_id(header, type, acpi_id, &apic_id);
+ map_x2apic_id(header, type, acpi_id, &phys_id);
exit:
kfree(buffer.pointer);
- return apic_id;
+ return phys_id;
}
-int acpi_get_apicid(acpi_handle handle, int type, u32 acpi_id)
+int acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
{
- int apic_id;
+ int phys_id;
- apic_id = map_mat_entry(handle, type, acpi_id);
- if (apic_id == -1)
- apic_id = map_madt_entry(type, acpi_id);
+ phys_id = map_mat_entry(handle, type, acpi_id);
+ if (phys_id == -1)
+ phys_id = map_madt_entry(type, acpi_id);
- return apic_id;
+ return phys_id;
}
-int acpi_map_cpuid(int apic_id, u32 acpi_id)
+int acpi_map_cpuid(int phys_id, u32 acpi_id)
{
#ifdef CONFIG_SMP
int i;
#endif
- if (apic_id == -1) {
+ if (phys_id == -1) {
/*
* On UP processor, there is no _MAT or MADT table.
- * So above apic_id is always set to -1.
+ * So above phys_id is always set to -1.
*
* BIOS may define multiple CPU handles even for UP processor.
* For example,
@@ -170,7 +170,7 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id)
* Processor (CPU3, 0x03, 0x00000410, 0x06) {}
* }
*
- * Ignores apic_id and always returns 0 for the processor
+ * Ignores phys_id and always returns 0 for the processor
* handle with acpi id 0 if nr_cpu_ids is 1.
* This should be the case if SMP tables are not found.
* Return -1 for other CPU's handle.
@@ -178,28 +178,28 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id)
if (nr_cpu_ids <= 1 && acpi_id == 0)
return acpi_id;
else
- return apic_id;
+ return phys_id;
}
#ifdef CONFIG_SMP
for_each_possible_cpu(i) {
- if (cpu_physical_id(i) == apic_id)
+ if (cpu_physical_id(i) == phys_id)
return i;
}
#else
/* In UP kernel, only processor 0 is valid */
- if (apic_id == 0)
- return apic_id;
+ if (phys_id == 0)
+ return phys_id;
#endif
return -1;
}
int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
{
- int apic_id;
+ int phys_id;
- apic_id = acpi_get_apicid(handle, type, acpi_id);
+ phys_id = acpi_get_phys_id(handle, type, acpi_id);
- return acpi_map_cpuid(apic_id, acpi_id);
+ return acpi_map_cpuid(phys_id, acpi_id);
}
EXPORT_SYMBOL_GPL(acpi_get_cpuid);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 16914cc30882..dc4d8960684a 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1001,7 +1001,7 @@ static void acpi_free_power_resources_lists(struct acpi_device *device)
if (device->wakeup.flags.valid)
acpi_power_resources_list_free(&device->wakeup.resources);
- if (!device->flags.power_manageable)
+ if (!device->power.flags.power_resources)
return;
for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
@@ -1744,10 +1744,8 @@ static void acpi_bus_get_power_flags(struct acpi_device *device)
device->power.flags.power_resources)
device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1;
- if (acpi_bus_init_power(device)) {
- acpi_free_power_resources_lists(device);
+ if (acpi_bus_init_power(device))
device->flags.power_manageable = 0;
- }
}
static void acpi_bus_get_flags(struct acpi_device *device)
@@ -2371,13 +2369,18 @@ static void acpi_bus_attach(struct acpi_device *device)
/* Skip devices that are not present. */
if (!acpi_device_is_present(device)) {
device->flags.visited = false;
+ device->flags.power_manageable = 0;
return;
}
if (device->handler)
goto ok;
if (!device->flags.initialized) {
- acpi_bus_update_power(device, NULL);
+ device->flags.power_manageable =
+ device->power.states[ACPI_STATE_D0].flags.valid;
+ if (acpi_bus_init_power(device))
+ device->flags.power_manageable = 0;
+
device->flags.initialized = true;
}
device->flags.visited = false;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index c72e79d2c5ad..032db459370f 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -522,6 +522,16 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"),
},
},
+
+ {
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */
+ .callback = video_disable_native_backlight,
+ .ident = "Dell XPS15 L521X",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS L521X"),
+ },
+ },
{}
};
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index ae9f615382f6..aa2224aa7caa 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -530,7 +530,7 @@ static int null_add_dev(void)
goto out_cleanup_queues;
nullb->q = blk_mq_init_queue(&nullb->tag_set);
- if (!nullb->q) {
+ if (IS_ERR(nullb->q)) {
rv = -ENOMEM;
goto out_cleanup_tags;
}
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index b1d5d8797315..cb529e9a82dd 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -215,6 +215,7 @@ static void nvme_set_info(struct nvme_cmd_info *cmd, void *ctx,
cmd->fn = handler;
cmd->ctx = ctx;
cmd->aborted = 0;
+ blk_mq_start_request(blk_mq_rq_from_pdu(cmd));
}
/* Special values must be less than 0x1000 */
@@ -431,8 +432,13 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
if (unlikely(status)) {
if (!(status & NVME_SC_DNR || blk_noretry_request(req))
&& (jiffies - req->start_time) < req->timeout) {
+ unsigned long flags;
+
blk_mq_requeue_request(req);
- blk_mq_kick_requeue_list(req->q);
+ spin_lock_irqsave(req->q->queue_lock, flags);
+ if (!blk_queue_stopped(req->q))
+ blk_mq_kick_requeue_list(req->q);
+ spin_unlock_irqrestore(req->q->queue_lock, flags);
return;
}
req->errors = nvme_error_status(status);
@@ -664,8 +670,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
}
}
- blk_mq_start_request(req);
-
nvme_set_info(cmd, iod, req_completion);
spin_lock_irq(&nvmeq->q_lock);
if (req->cmd_flags & REQ_DISCARD)
@@ -835,6 +839,7 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev)
if (IS_ERR(req))
return PTR_ERR(req);
+ req->cmd_flags |= REQ_NO_TIMEOUT;
cmd_info = blk_mq_rq_to_pdu(req);
nvme_set_info(cmd_info, req, async_req_completion);
@@ -1016,14 +1021,19 @@ static void nvme_abort_req(struct request *req)
struct nvme_command cmd;
if (!nvmeq->qid || cmd_rq->aborted) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_list_lock, flags);
if (work_busy(&dev->reset_work))
- return;
+ goto out;
list_del_init(&dev->node);
dev_warn(&dev->pci_dev->dev,
"I/O %d QID %d timeout, reset controller\n",
req->tag, nvmeq->qid);
dev->reset_workfn = nvme_reset_failed_dev;
queue_work(nvme_workq, &dev->reset_work);
+ out:
+ spin_unlock_irqrestore(&dev_list_lock, flags);
return;
}
@@ -1064,15 +1074,22 @@ static void nvme_cancel_queue_ios(struct blk_mq_hw_ctx *hctx,
void *ctx;
nvme_completion_fn fn;
struct nvme_cmd_info *cmd;
- static struct nvme_completion cqe = {
- .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1),
- };
+ struct nvme_completion cqe;
+
+ if (!blk_mq_request_started(req))
+ return;
cmd = blk_mq_rq_to_pdu(req);
if (cmd->ctx == CMD_CTX_CANCELLED)
return;
+ if (blk_queue_dying(req->q))
+ cqe.status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
+ else
+ cqe.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
+
+
dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n",
req->tag, nvmeq->qid);
ctx = cancel_cmd_info(cmd, &fn);
@@ -1084,17 +1101,29 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
struct nvme_queue *nvmeq = cmd->nvmeq;
- dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
- nvmeq->qid);
- if (nvmeq->dev->initialized)
- nvme_abort_req(req);
-
/*
* The aborted req will be completed on receiving the abort req.
* We enable the timer again. If hit twice, it'll cause a device reset,
* as the device then is in a faulty state.
*/
- return BLK_EH_RESET_TIMER;
+ int ret = BLK_EH_RESET_TIMER;
+
+ dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
+ nvmeq->qid);
+
+ spin_lock_irq(&nvmeq->q_lock);
+ if (!nvmeq->dev->initialized) {
+ /*
+ * Force cancelled command frees the request, which requires we
+ * return BLK_EH_NOT_HANDLED.
+ */
+ nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
+ ret = BLK_EH_NOT_HANDLED;
+ } else
+ nvme_abort_req(req);
+ spin_unlock_irq(&nvmeq->q_lock);
+
+ return ret;
}
static void nvme_free_queue(struct nvme_queue *nvmeq)
@@ -1131,10 +1160,16 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
*/
static int nvme_suspend_queue(struct nvme_queue *nvmeq)
{
- int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
+ int vector;
spin_lock_irq(&nvmeq->q_lock);
+ if (nvmeq->cq_vector == -1) {
+ spin_unlock_irq(&nvmeq->q_lock);
+ return 1;
+ }
+ vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
nvmeq->dev->online_queues--;
+ nvmeq->cq_vector = -1;
spin_unlock_irq(&nvmeq->q_lock);
irq_set_affinity_hint(vector, NULL);
@@ -1169,11 +1204,13 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
adapter_delete_sq(dev, qid);
adapter_delete_cq(dev, qid);
}
+ if (!qid && dev->admin_q)
+ blk_mq_freeze_queue_start(dev->admin_q);
nvme_clear_queue(nvmeq);
}
static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
- int depth, int vector)
+ int depth)
{
struct device *dmadev = &dev->pci_dev->dev;
struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL);
@@ -1199,7 +1236,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
nvmeq->cq_phase = 1;
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
nvmeq->q_depth = depth;
- nvmeq->cq_vector = vector;
nvmeq->qid = qid;
dev->queue_count++;
dev->queues[qid] = nvmeq;
@@ -1244,6 +1280,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
struct nvme_dev *dev = nvmeq->dev;
int result;
+ nvmeq->cq_vector = qid - 1;
result = adapter_alloc_cq(dev, qid, nvmeq);
if (result < 0)
return result;
@@ -1355,6 +1392,14 @@ static struct blk_mq_ops nvme_mq_ops = {
.timeout = nvme_timeout,
};
+static void nvme_dev_remove_admin(struct nvme_dev *dev)
+{
+ if (dev->admin_q && !blk_queue_dying(dev->admin_q)) {
+ blk_cleanup_queue(dev->admin_q);
+ blk_mq_free_tag_set(&dev->admin_tagset);
+ }
+}
+
static int nvme_alloc_admin_tags(struct nvme_dev *dev)
{
if (!dev->admin_q) {
@@ -1370,21 +1415,20 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
return -ENOMEM;
dev->admin_q = blk_mq_init_queue(&dev->admin_tagset);
- if (!dev->admin_q) {
+ if (IS_ERR(dev->admin_q)) {
blk_mq_free_tag_set(&dev->admin_tagset);
return -ENOMEM;
}
- }
+ if (!blk_get_queue(dev->admin_q)) {
+ nvme_dev_remove_admin(dev);
+ return -ENODEV;
+ }
+ } else
+ blk_mq_unfreeze_queue(dev->admin_q);
return 0;
}
-static void nvme_free_admin_tags(struct nvme_dev *dev)
-{
- if (dev->admin_q)
- blk_mq_free_tag_set(&dev->admin_tagset);
-}
-
static int nvme_configure_admin_queue(struct nvme_dev *dev)
{
int result;
@@ -1416,7 +1460,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
nvmeq = dev->queues[0];
if (!nvmeq) {
- nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, 0);
+ nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
if (!nvmeq)
return -ENOMEM;
}
@@ -1439,18 +1483,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
if (result)
goto free_nvmeq;
- result = nvme_alloc_admin_tags(dev);
- if (result)
- goto free_nvmeq;
-
+ nvmeq->cq_vector = 0;
result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
if (result)
- goto free_tags;
+ goto free_nvmeq;
return result;
- free_tags:
- nvme_free_admin_tags(dev);
free_nvmeq:
nvme_free_queues(dev, 0);
return result;
@@ -1944,7 +1983,7 @@ static void nvme_create_io_queues(struct nvme_dev *dev)
unsigned i;
for (i = dev->queue_count; i <= dev->max_qid; i++)
- if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1))
+ if (!nvme_alloc_queue(dev, i, dev->q_depth))
break;
for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
@@ -2235,13 +2274,18 @@ static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
break;
if (!schedule_timeout(ADMIN_TIMEOUT) ||
fatal_signal_pending(current)) {
+ /*
+ * Disable the controller first since we can't trust it
+ * at this point, but leave the admin queue enabled
+ * until all queue deletion requests are flushed.
+ * FIXME: This may take a while if there are more h/w
+ * queues than admin tags.
+ */
set_current_state(TASK_RUNNING);
-
nvme_disable_ctrl(dev, readq(&dev->bar->cap));
- nvme_disable_queue(dev, 0);
-
- send_sig(SIGKILL, dq->worker->task, 1);
+ nvme_clear_queue(dev->queues[0]);
flush_kthread_worker(dq->worker);
+ nvme_disable_queue(dev, 0);
return;
}
}
@@ -2318,7 +2362,6 @@ static void nvme_del_queue_start(struct kthread_work *work)
{
struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
cmdinfo.work);
- allow_signal(SIGKILL);
if (nvme_delete_sq(nvmeq))
nvme_del_queue_end(nvmeq);
}
@@ -2376,6 +2419,34 @@ static void nvme_dev_list_remove(struct nvme_dev *dev)
kthread_stop(tmp);
}
+static void nvme_freeze_queues(struct nvme_dev *dev)
+{
+ struct nvme_ns *ns;
+
+ list_for_each_entry(ns, &dev->namespaces, list) {
+ blk_mq_freeze_queue_start(ns->queue);
+
+ spin_lock(ns->queue->queue_lock);
+ queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
+ spin_unlock(ns->queue->queue_lock);
+
+ blk_mq_cancel_requeue_work(ns->queue);
+ blk_mq_stop_hw_queues(ns->queue);
+ }
+}
+
+static void nvme_unfreeze_queues(struct nvme_dev *dev)
+{
+ struct nvme_ns *ns;
+
+ list_for_each_entry(ns, &dev->namespaces, list) {
+ queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
+ blk_mq_unfreeze_queue(ns->queue);
+ blk_mq_start_stopped_hw_queues(ns->queue, true);
+ blk_mq_kick_requeue_list(ns->queue);
+ }
+}
+
static void nvme_dev_shutdown(struct nvme_dev *dev)
{
int i;
@@ -2384,8 +2455,10 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
dev->initialized = 0;
nvme_dev_list_remove(dev);
- if (dev->bar)
+ if (dev->bar) {
+ nvme_freeze_queues(dev);
csts = readl(&dev->bar->csts);
+ }
if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
for (i = dev->queue_count - 1; i >= 0; i--) {
struct nvme_queue *nvmeq = dev->queues[i];
@@ -2400,12 +2473,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
nvme_dev_unmap(dev);
}
-static void nvme_dev_remove_admin(struct nvme_dev *dev)
-{
- if (dev->admin_q && !blk_queue_dying(dev->admin_q))
- blk_cleanup_queue(dev->admin_q);
-}
-
static void nvme_dev_remove(struct nvme_dev *dev)
{
struct nvme_ns *ns;
@@ -2413,8 +2480,10 @@ static void nvme_dev_remove(struct nvme_dev *dev)
list_for_each_entry(ns, &dev->namespaces, list) {
if (ns->disk->flags & GENHD_FL_UP)
del_gendisk(ns->disk);
- if (!blk_queue_dying(ns->queue))
+ if (!blk_queue_dying(ns->queue)) {
+ blk_mq_abort_requeue_list(ns->queue);
blk_cleanup_queue(ns->queue);
+ }
}
}
@@ -2495,6 +2564,7 @@ static void nvme_free_dev(struct kref *kref)
nvme_free_namespaces(dev);
nvme_release_instance(dev);
blk_mq_free_tag_set(&dev->tagset);
+ blk_put_queue(dev->admin_q);
kfree(dev->queues);
kfree(dev->entry);
kfree(dev);
@@ -2591,15 +2661,20 @@ static int nvme_dev_start(struct nvme_dev *dev)
}
nvme_init_queue(dev->queues[0], 0);
+ result = nvme_alloc_admin_tags(dev);
+ if (result)
+ goto disable;
result = nvme_setup_io_queues(dev);
if (result)
- goto disable;
+ goto free_tags;
nvme_set_irq_hints(dev);
return result;
+ free_tags:
+ nvme_dev_remove_admin(dev);
disable:
nvme_disable_queue(dev, 0);
nvme_dev_list_remove(dev);
@@ -2639,6 +2714,9 @@ static int nvme_dev_resume(struct nvme_dev *dev)
dev->reset_workfn = nvme_remove_disks;
queue_work(nvme_workq, &dev->reset_work);
spin_unlock(&dev_list_lock);
+ } else {
+ nvme_unfreeze_queues(dev);
+ nvme_set_irq_hints(dev);
}
dev->initialized = 1;
return 0;
@@ -2776,11 +2854,10 @@ static void nvme_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
flush_work(&dev->reset_work);
misc_deregister(&dev->miscdev);
- nvme_dev_remove(dev);
nvme_dev_shutdown(dev);
+ nvme_dev_remove(dev);
nvme_dev_remove_admin(dev);
nvme_free_queues(dev, 0);
- nvme_free_admin_tags(dev);
nvme_release_prp_pools(dev);
kref_put(&dev->kref, nvme_free_dev);
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 7ef7c098708f..cdfbd21e3597 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -638,7 +638,7 @@ static int virtblk_probe(struct virtio_device *vdev)
goto out_put_disk;
q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
- if (!q) {
+ if (IS_ERR(q)) {
err = -ENOMEM;
goto out_free_tags;
}
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index fd5a5e85d7dc..982b96323f82 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -969,7 +969,8 @@ static void sender(void *send_info,
do_gettimeofday(&t);
pr_info("**Enqueue %02x %02x: %ld.%6.6ld\n",
- msg->data[0], msg->data[1], t.tv_sec, t.tv_usec);
+ msg->data[0], msg->data[1],
+ (long) t.tv_sec, (long) t.tv_usec);
}
}
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
index 978b51eae2ec..ce3c1558cb0a 100644
--- a/drivers/gpio/gpio-dln2.c
+++ b/drivers/gpio/gpio-dln2.c
@@ -47,13 +47,6 @@
#define DLN2_GPIO_MAX_PINS 32
-struct dln2_irq_work {
- struct work_struct work;
- struct dln2_gpio *dln2;
- int pin;
- int type;
-};
-
struct dln2_gpio {
struct platform_device *pdev;
struct gpio_chip gpio;
@@ -64,10 +57,12 @@ struct dln2_gpio {
*/
DECLARE_BITMAP(output_enabled, DLN2_GPIO_MAX_PINS);
- DECLARE_BITMAP(irqs_masked, DLN2_GPIO_MAX_PINS);
- DECLARE_BITMAP(irqs_enabled, DLN2_GPIO_MAX_PINS);
- DECLARE_BITMAP(irqs_pending, DLN2_GPIO_MAX_PINS);
- struct dln2_irq_work *irq_work;
+ /* active IRQs - not synced to hardware */
+ DECLARE_BITMAP(unmasked_irqs, DLN2_GPIO_MAX_PINS);
+ /* active IRQS - synced to hardware */
+ DECLARE_BITMAP(enabled_irqs, DLN2_GPIO_MAX_PINS);
+ int irq_type[DLN2_GPIO_MAX_PINS];
+ struct mutex irq_lock;
};
struct dln2_gpio_pin {
@@ -141,16 +136,16 @@ static int dln2_gpio_pin_get_out_val(struct dln2_gpio *dln2, unsigned int pin)
return !!ret;
}
-static void dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2,
- unsigned int pin, int value)
+static int dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2,
+ unsigned int pin, int value)
{
struct dln2_gpio_pin_val req = {
.pin = cpu_to_le16(pin),
.value = value,
};
- dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req,
- sizeof(req));
+ return dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req,
+ sizeof(req));
}
#define DLN2_GPIO_DIRECTION_IN 0
@@ -267,6 +262,13 @@ static int dln2_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
static int dln2_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
int value)
{
+ struct dln2_gpio *dln2 = container_of(chip, struct dln2_gpio, gpio);
+ int ret;
+
+ ret = dln2_gpio_pin_set_out_val(dln2, offset, value);
+ if (ret < 0)
+ return ret;
+
return dln2_gpio_set_direction(chip, offset, DLN2_GPIO_DIRECTION_OUT);
}
@@ -297,36 +299,13 @@ static int dln2_gpio_set_event_cfg(struct dln2_gpio *dln2, unsigned pin,
&req, sizeof(req));
}
-static void dln2_irq_work(struct work_struct *w)
-{
- struct dln2_irq_work *iw = container_of(w, struct dln2_irq_work, work);
- struct dln2_gpio *dln2 = iw->dln2;
- u8 type = iw->type & DLN2_GPIO_EVENT_MASK;
-
- if (test_bit(iw->pin, dln2->irqs_enabled))
- dln2_gpio_set_event_cfg(dln2, iw->pin, type, 0);
- else
- dln2_gpio_set_event_cfg(dln2, iw->pin, DLN2_GPIO_EVENT_NONE, 0);
-}
-
-static void dln2_irq_enable(struct irq_data *irqd)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
- struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
- int pin = irqd_to_hwirq(irqd);
-
- set_bit(pin, dln2->irqs_enabled);
- schedule_work(&dln2->irq_work[pin].work);
-}
-
-static void dln2_irq_disable(struct irq_data *irqd)
+static void dln2_irq_unmask(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
int pin = irqd_to_hwirq(irqd);
- clear_bit(pin, dln2->irqs_enabled);
- schedule_work(&dln2->irq_work[pin].work);
+ set_bit(pin, dln2->unmasked_irqs);
}
static void dln2_irq_mask(struct irq_data *irqd)
@@ -335,27 +314,7 @@ static void dln2_irq_mask(struct irq_data *irqd)
struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
int pin = irqd_to_hwirq(irqd);
- set_bit(pin, dln2->irqs_masked);
-}
-
-static void dln2_irq_unmask(struct irq_data *irqd)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
- struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
- struct device *dev = dln2->gpio.dev;
- int pin = irqd_to_hwirq(irqd);
-
- if (test_and_clear_bit(pin, dln2->irqs_pending)) {
- int irq;
-
- irq = irq_find_mapping(dln2->gpio.irqdomain, pin);
- if (!irq) {
- dev_err(dev, "pin %d not mapped to IRQ\n", pin);
- return;
- }
-
- generic_handle_irq(irq);
- }
+ clear_bit(pin, dln2->unmasked_irqs);
}
static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
@@ -366,19 +325,19 @@ static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
switch (type) {
case IRQ_TYPE_LEVEL_HIGH:
- dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_HIGH;
+ dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_HIGH;
break;
case IRQ_TYPE_LEVEL_LOW:
- dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_LOW;
+ dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_LOW;
break;
case IRQ_TYPE_EDGE_BOTH:
- dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE;
+ dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE;
break;
case IRQ_TYPE_EDGE_RISING:
- dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_RISING;
+ dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_RISING;
break;
case IRQ_TYPE_EDGE_FALLING:
- dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_FALLING;
+ dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_FALLING;
break;
default:
return -EINVAL;
@@ -387,13 +346,50 @@ static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
return 0;
}
+static void dln2_irq_bus_lock(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
+
+ mutex_lock(&dln2->irq_lock);
+}
+
+static void dln2_irq_bus_unlock(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
+ int pin = irqd_to_hwirq(irqd);
+ int enabled, unmasked;
+ unsigned type;
+ int ret;
+
+ enabled = test_bit(pin, dln2->enabled_irqs);
+ unmasked = test_bit(pin, dln2->unmasked_irqs);
+
+ if (enabled != unmasked) {
+ if (unmasked) {
+ type = dln2->irq_type[pin] & DLN2_GPIO_EVENT_MASK;
+ set_bit(pin, dln2->enabled_irqs);
+ } else {
+ type = DLN2_GPIO_EVENT_NONE;
+ clear_bit(pin, dln2->enabled_irqs);
+ }
+
+ ret = dln2_gpio_set_event_cfg(dln2, pin, type, 0);
+ if (ret)
+ dev_err(dln2->gpio.dev, "failed to set event\n");
+ }
+
+ mutex_unlock(&dln2->irq_lock);
+}
+
static struct irq_chip dln2_gpio_irqchip = {
.name = "dln2-irq",
- .irq_enable = dln2_irq_enable,
- .irq_disable = dln2_irq_disable,
.irq_mask = dln2_irq_mask,
.irq_unmask = dln2_irq_unmask,
.irq_set_type = dln2_irq_set_type,
+ .irq_bus_lock = dln2_irq_bus_lock,
+ .irq_bus_sync_unlock = dln2_irq_bus_unlock,
};
static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
@@ -425,14 +421,7 @@ static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
return;
}
- if (!test_bit(pin, dln2->irqs_enabled))
- return;
- if (test_bit(pin, dln2->irqs_masked)) {
- set_bit(pin, dln2->irqs_pending);
- return;
- }
-
- switch (dln2->irq_work[pin].type) {
+ switch (dln2->irq_type[pin]) {
case DLN2_GPIO_EVENT_CHANGE_RISING:
if (event->value)
generic_handle_irq(irq);
@@ -451,7 +440,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
struct dln2_gpio *dln2;
struct device *dev = &pdev->dev;
int pins;
- int i, ret;
+ int ret;
pins = dln2_gpio_get_pin_count(pdev);
if (pins < 0) {
@@ -467,15 +456,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
if (!dln2)
return -ENOMEM;
- dln2->irq_work = devm_kcalloc(&pdev->dev, pins,
- sizeof(struct dln2_irq_work), GFP_KERNEL);
- if (!dln2->irq_work)
- return -ENOMEM;
- for (i = 0; i < pins; i++) {
- INIT_WORK(&dln2->irq_work[i].work, dln2_irq_work);
- dln2->irq_work[i].pin = i;
- dln2->irq_work[i].dln2 = dln2;
- }
+ mutex_init(&dln2->irq_lock);
dln2->pdev = pdev;
@@ -529,11 +510,8 @@ out:
static int dln2_gpio_remove(struct platform_device *pdev)
{
struct dln2_gpio *dln2 = platform_get_drvdata(pdev);
- int i;
dln2_unregister_event_cb(pdev, DLN2_GPIO_CONDITION_MET_EV);
- for (i = 0; i < dln2->gpio.ngpio; i++)
- flush_work(&dln2->irq_work[i].work);
gpiochip_remove(&dln2->gpio);
return 0;
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 09daaf2aeb56..3a5a71050559 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -441,7 +441,8 @@ static int grgpio_probe(struct platform_device *ofdev)
err = gpiochip_add(gc);
if (err) {
dev_err(&ofdev->dev, "Could not add gpiochip\n");
- irq_domain_remove(priv->domain);
+ if (priv->domain)
+ irq_domain_remove(priv->domain);
return err;
}
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 66e40398b3d3..e620807418ea 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_TDFX) += tdfx/
obj-$(CONFIG_DRM_R128) += r128/
+obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_MGA) += mga/
obj-$(CONFIG_DRM_I810) += i810/
@@ -67,4 +68,3 @@ obj-$(CONFIG_DRM_IMX) += imx/
obj-y += i2c/
obj-y += panel/
obj-y += bridge/
-obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 7d4974b83af7..fcfdf23e1913 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -31,7 +31,6 @@
#include <uapi/linux/kfd_ioctl.h>
#include <linux/time.h>
#include <linux/mm.h>
-#include <linux/uaccess.h>
#include <uapi/asm-generic/mman-common.h>
#include <asm/processor.h>
#include "kfd_priv.h"
@@ -127,17 +126,14 @@ static int kfd_open(struct inode *inode, struct file *filep)
return 0;
}
-static long kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
- void __user *arg)
+static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
+ void *data)
{
- struct kfd_ioctl_get_version_args args;
+ struct kfd_ioctl_get_version_args *args = data;
int err = 0;
- args.major_version = KFD_IOCTL_MAJOR_VERSION;
- args.minor_version = KFD_IOCTL_MINOR_VERSION;
-
- if (copy_to_user(arg, &args, sizeof(args)))
- err = -EFAULT;
+ args->major_version = KFD_IOCTL_MAJOR_VERSION;
+ args->minor_version = KFD_IOCTL_MINOR_VERSION;
return err;
}
@@ -221,10 +217,10 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
return 0;
}
-static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
- void __user *arg)
+static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
+ void *data)
{
- struct kfd_ioctl_create_queue_args args;
+ struct kfd_ioctl_create_queue_args *args = data;
struct kfd_dev *dev;
int err = 0;
unsigned int queue_id;
@@ -233,16 +229,13 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
memset(&q_properties, 0, sizeof(struct queue_properties));
- if (copy_from_user(&args, arg, sizeof(args)))
- return -EFAULT;
-
pr_debug("kfd: creating queue ioctl\n");
- err = set_queue_properties_from_user(&q_properties, &args);
+ err = set_queue_properties_from_user(&q_properties, args);
if (err)
return err;
- dev = kfd_device_by_id(args.gpu_id);
+ dev = kfd_device_by_id(args->gpu_id);
if (dev == NULL)
return -EINVAL;
@@ -250,7 +243,7 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
pdd = kfd_bind_process_to_device(dev, p);
if (IS_ERR(pdd)) {
- err = PTR_ERR(pdd);
+ err = -ESRCH;
goto err_bind_process;
}
@@ -263,33 +256,26 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
if (err != 0)
goto err_create_queue;
- args.queue_id = queue_id;
+ args->queue_id = queue_id;
/* Return gpu_id as doorbell offset for mmap usage */
- args.doorbell_offset = args.gpu_id << PAGE_SHIFT;
-
- if (copy_to_user(arg, &args, sizeof(args))) {
- err = -EFAULT;
- goto err_copy_args_out;
- }
+ args->doorbell_offset = args->gpu_id << PAGE_SHIFT;
mutex_unlock(&p->mutex);
- pr_debug("kfd: queue id %d was created successfully\n", args.queue_id);
+ pr_debug("kfd: queue id %d was created successfully\n", args->queue_id);
pr_debug("ring buffer address == 0x%016llX\n",
- args.ring_base_address);
+ args->ring_base_address);
pr_debug("read ptr address == 0x%016llX\n",
- args.read_pointer_address);
+ args->read_pointer_address);
pr_debug("write ptr address == 0x%016llX\n",
- args.write_pointer_address);
+ args->write_pointer_address);
return 0;
-err_copy_args_out:
- pqm_destroy_queue(&p->pqm, queue_id);
err_create_queue:
err_bind_process:
mutex_unlock(&p->mutex);
@@ -297,99 +283,90 @@ err_bind_process:
}
static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
- void __user *arg)
+ void *data)
{
int retval;
- struct kfd_ioctl_destroy_queue_args args;
-
- if (copy_from_user(&args, arg, sizeof(args)))
- return -EFAULT;
+ struct kfd_ioctl_destroy_queue_args *args = data;
pr_debug("kfd: destroying queue id %d for PASID %d\n",
- args.queue_id,
+ args->queue_id,
p->pasid);
mutex_lock(&p->mutex);
- retval = pqm_destroy_queue(&p->pqm, args.queue_id);
+ retval = pqm_destroy_queue(&p->pqm, args->queue_id);
mutex_unlock(&p->mutex);
return retval;
}
static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
- void __user *arg)
+ void *data)
{
int retval;
- struct kfd_ioctl_update_queue_args args;
+ struct kfd_ioctl_update_queue_args *args = data;
struct queue_properties properties;
- if (copy_from_user(&args, arg, sizeof(args)))
- return -EFAULT;
-
- if (args.queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
+ if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
return -EINVAL;
}
- if (args.queue_priority > KFD_MAX_QUEUE_PRIORITY) {
+ if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
return -EINVAL;
}
- if ((args.ring_base_address) &&
+ if ((args->ring_base_address) &&
(!access_ok(VERIFY_WRITE,
- (const void __user *) args.ring_base_address,
+ (const void __user *) args->ring_base_address,
sizeof(uint64_t)))) {
pr_err("kfd: can't access ring base address\n");
return -EFAULT;
}
- if (!is_power_of_2(args.ring_size) && (args.ring_size != 0)) {
+ if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
pr_err("kfd: ring size must be a power of 2 or 0\n");
return -EINVAL;
}
- properties.queue_address = args.ring_base_address;
- properties.queue_size = args.ring_size;
- properties.queue_percent = args.queue_percentage;
- properties.priority = args.queue_priority;
+ properties.queue_address = args->ring_base_address;
+ properties.queue_size = args->ring_size;
+ properties.queue_percent = args->queue_percentage;
+ properties.priority = args->queue_priority;
pr_debug("kfd: updating queue id %d for PASID %d\n",
- args.queue_id, p->pasid);
+ args->queue_id, p->pasid);
mutex_lock(&p->mutex);
- retval = pqm_update_queue(&p->pqm, args.queue_id, &properties);
+ retval = pqm_update_queue(&p->pqm, args->queue_id, &properties);
mutex_unlock(&p->mutex);
return retval;
}
-static long kfd_ioctl_set_memory_policy(struct file *filep,
- struct kfd_process *p, void __user *arg)
+static int kfd_ioctl_set_memory_policy(struct file *filep,
+ struct kfd_process *p, void *data)
{
- struct kfd_ioctl_set_memory_policy_args args;
+ struct kfd_ioctl_set_memory_policy_args *args = data;
struct kfd_dev *dev;
int err = 0;
struct kfd_process_device *pdd;
enum cache_policy default_policy, alternate_policy;
- if (copy_from_user(&args, arg, sizeof(args)))
- return -EFAULT;
-
- if (args.default_policy != KFD_IOC_CACHE_POLICY_COHERENT
- && args.default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
+ if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT
+ && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
return -EINVAL;
}
- if (args.alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
- && args.alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
+ if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
+ && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
return -EINVAL;
}
- dev = kfd_device_by_id(args.gpu_id);
+ dev = kfd_device_by_id(args->gpu_id);
if (dev == NULL)
return -EINVAL;
@@ -397,23 +374,23 @@ static long kfd_ioctl_set_memory_policy(struct file *filep,
pdd = kfd_bind_process_to_device(dev, p);
if (IS_ERR(pdd)) {
- err = PTR_ERR(pdd);
+ err = -ESRCH;
goto out;
}
- default_policy = (args.default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
+ default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
? cache_policy_coherent : cache_policy_noncoherent;
alternate_policy =
- (args.alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
+ (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
? cache_policy_coherent : cache_policy_noncoherent;
if (!dev->dqm->set_cache_memory_policy(dev->dqm,
&pdd->qpd,
default_policy,
alternate_policy,
- (void __user *)args.alternate_aperture_base,
- args.alternate_aperture_size))
+ (void __user *)args->alternate_aperture_base,
+ args->alternate_aperture_size))
err = -EINVAL;
out:
@@ -422,53 +399,44 @@ out:
return err;
}
-static long kfd_ioctl_get_clock_counters(struct file *filep,
- struct kfd_process *p, void __user *arg)
+static int kfd_ioctl_get_clock_counters(struct file *filep,
+ struct kfd_process *p, void *data)
{
- struct kfd_ioctl_get_clock_counters_args args;
+ struct kfd_ioctl_get_clock_counters_args *args = data;
struct kfd_dev *dev;
struct timespec time;
- if (copy_from_user(&args, arg, sizeof(args)))
- return -EFAULT;
-
- dev = kfd_device_by_id(args.gpu_id);
+ dev = kfd_device_by_id(args->gpu_id);
if (dev == NULL)
return -EINVAL;
/* Reading GPU clock counter from KGD */
- args.gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
+ args->gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
/* No access to rdtsc. Using raw monotonic time */
getrawmonotonic(&time);
- args.cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
+ args->cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
get_monotonic_boottime(&time);
- args.system_clock_counter = (uint64_t)timespec_to_ns(&time);
+ args->system_clock_counter = (uint64_t)timespec_to_ns(&time);
/* Since the counter is in nano-seconds we use 1GHz frequency */
- args.system_clock_freq = 1000000000;
-
- if (copy_to_user(arg, &args, sizeof(args)))
- return -EFAULT;
+ args->system_clock_freq = 1000000000;
return 0;
}
static int kfd_ioctl_get_process_apertures(struct file *filp,
- struct kfd_process *p, void __user *arg)
+ struct kfd_process *p, void *data)
{
- struct kfd_ioctl_get_process_apertures_args args;
+ struct kfd_ioctl_get_process_apertures_args *args = data;
struct kfd_process_device_apertures *pAperture;
struct kfd_process_device *pdd;
dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
- if (copy_from_user(&args, arg, sizeof(args)))
- return -EFAULT;
-
- args.num_of_nodes = 0;
+ args->num_of_nodes = 0;
mutex_lock(&p->mutex);
@@ -477,7 +445,8 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
/* Run over all pdd of the process */
pdd = kfd_get_first_process_device_data(p);
do {
- pAperture = &args.process_apertures[args.num_of_nodes];
+ pAperture =
+ &args->process_apertures[args->num_of_nodes];
pAperture->gpu_id = pdd->dev->id;
pAperture->lds_base = pdd->lds_base;
pAperture->lds_limit = pdd->lds_limit;
@@ -487,7 +456,7 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
pAperture->scratch_limit = pdd->scratch_limit;
dev_dbg(kfd_device,
- "node id %u\n", args.num_of_nodes);
+ "node id %u\n", args->num_of_nodes);
dev_dbg(kfd_device,
"gpu id %u\n", pdd->dev->id);
dev_dbg(kfd_device,
@@ -503,80 +472,131 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
dev_dbg(kfd_device,
"scratch_limit %llX\n", pdd->scratch_limit);
- args.num_of_nodes++;
+ args->num_of_nodes++;
} while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL &&
- (args.num_of_nodes < NUM_OF_SUPPORTED_GPUS));
+ (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
}
mutex_unlock(&p->mutex);
- if (copy_to_user(arg, &args, sizeof(args)))
- return -EFAULT;
-
return 0;
}
+#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
+ [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
+
+/** Ioctl table */
+static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION,
+ kfd_ioctl_get_version, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE,
+ kfd_ioctl_create_queue, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE,
+ kfd_ioctl_destroy_queue, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY,
+ kfd_ioctl_set_memory_policy, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS,
+ kfd_ioctl_get_clock_counters, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES,
+ kfd_ioctl_get_process_apertures, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE,
+ kfd_ioctl_update_queue, 0),
+};
+
+#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
+
static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
struct kfd_process *process;
- long err = -EINVAL;
+ amdkfd_ioctl_t *func;
+ const struct amdkfd_ioctl_desc *ioctl = NULL;
+ unsigned int nr = _IOC_NR(cmd);
+ char stack_kdata[128];
+ char *kdata = NULL;
+ unsigned int usize, asize;
+ int retcode = -EINVAL;
- dev_dbg(kfd_device,
- "ioctl cmd 0x%x (#%d), arg 0x%lx\n",
- cmd, _IOC_NR(cmd), arg);
+ if (nr >= AMDKFD_CORE_IOCTL_COUNT)
+ goto err_i1;
+
+ if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
+ u32 amdkfd_size;
+
+ ioctl = &amdkfd_ioctls[nr];
+
+ amdkfd_size = _IOC_SIZE(ioctl->cmd);
+ usize = asize = _IOC_SIZE(cmd);
+ if (amdkfd_size > asize)
+ asize = amdkfd_size;
+
+ cmd = ioctl->cmd;
+ } else
+ goto err_i1;
+
+ dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg);
process = kfd_get_process(current);
- if (IS_ERR(process))
- return PTR_ERR(process);
+ if (IS_ERR(process)) {
+ dev_dbg(kfd_device, "no process\n");
+ goto err_i1;
+ }
- switch (cmd) {
- case KFD_IOC_GET_VERSION:
- err = kfd_ioctl_get_version(filep, process, (void __user *)arg);
- break;
- case KFD_IOC_CREATE_QUEUE:
- err = kfd_ioctl_create_queue(filep, process,
- (void __user *)arg);
- break;
-
- case KFD_IOC_DESTROY_QUEUE:
- err = kfd_ioctl_destroy_queue(filep, process,
- (void __user *)arg);
- break;
-
- case KFD_IOC_SET_MEMORY_POLICY:
- err = kfd_ioctl_set_memory_policy(filep, process,
- (void __user *)arg);
- break;
-
- case KFD_IOC_GET_CLOCK_COUNTERS:
- err = kfd_ioctl_get_clock_counters(filep, process,
- (void __user *)arg);
- break;
-
- case KFD_IOC_GET_PROCESS_APERTURES:
- err = kfd_ioctl_get_process_apertures(filep, process,
- (void __user *)arg);
- break;
-
- case KFD_IOC_UPDATE_QUEUE:
- err = kfd_ioctl_update_queue(filep, process,
- (void __user *)arg);
- break;
-
- default:
- dev_err(kfd_device,
- "unknown ioctl cmd 0x%x, arg 0x%lx)\n",
- cmd, arg);
- err = -EINVAL;
- break;
+ /* Do not trust userspace, use our own definition */
+ func = ioctl->func;
+
+ if (unlikely(!func)) {
+ dev_dbg(kfd_device, "no function\n");
+ retcode = -EINVAL;
+ goto err_i1;
}
- if (err < 0)
- dev_err(kfd_device,
- "ioctl error %ld for ioctl cmd 0x%x (#%d)\n",
- err, cmd, _IOC_NR(cmd));
+ if (cmd & (IOC_IN | IOC_OUT)) {
+ if (asize <= sizeof(stack_kdata)) {
+ kdata = stack_kdata;
+ } else {
+ kdata = kmalloc(asize, GFP_KERNEL);
+ if (!kdata) {
+ retcode = -ENOMEM;
+ goto err_i1;
+ }
+ }
+ if (asize > usize)
+ memset(kdata + usize, 0, asize - usize);
+ }
- return err;
+ if (cmd & IOC_IN) {
+ if (copy_from_user(kdata, (void __user *)arg, usize) != 0) {
+ retcode = -EFAULT;
+ goto err_i1;
+ }
+ } else if (cmd & IOC_OUT) {
+ memset(kdata, 0, usize);
+ }
+
+ retcode = func(filep, process, kdata);
+
+ if (cmd & IOC_OUT)
+ if (copy_to_user((void __user *)arg, kdata, usize) != 0)
+ retcode = -EFAULT;
+
+err_i1:
+ if (!ioctl)
+ dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
+ task_pid_nr(current), cmd, nr);
+
+ if (kdata != stack_kdata)
+ kfree(kdata);
+
+ if (retcode)
+ dev_dbg(kfd_device, "ret = %d\n", retcode);
+
+ return retcode;
}
static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 924e90c072e5..9c8961d22360 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -161,6 +161,9 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
{
int bit = qpd->vmid - KFD_VMID_START_OFFSET;
+ /* Release the vmid mapping */
+ set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
+
set_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
qpd->vmid = 0;
q->properties.vmid = 0;
@@ -272,6 +275,18 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
return retval;
}
+ pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n",
+ q->pipe,
+ q->queue);
+
+ retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
+ q->queue, q->properties.write_ptr);
+ if (retval != 0) {
+ deallocate_hqd(dqm, q);
+ mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ return retval;
+ }
+
return 0;
}
@@ -320,6 +335,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
{
int retval;
struct mqd_manager *mqd;
+ bool prev_active = false;
BUG_ON(!dqm || !q || !q->mqd);
@@ -330,10 +346,18 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
return -ENOMEM;
}
- retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
if (q->properties.is_active == true)
+ prev_active = true;
+
+ /*
+ *
+ * check active state vs. the previous state
+ * and modify counter accordingly
+ */
+ retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
+ if ((q->properties.is_active == true) && (prev_active == false))
dqm->queue_count++;
- else
+ else if ((q->properties.is_active == false) && (prev_active == true))
dqm->queue_count--;
if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index adc31474e786..4c3828cf45bf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -184,7 +184,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd,
uint32_t queue_id)
{
- return kfd2kgd->hqd_is_occupies(mm->dev->kgd, queue_address,
+ return kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address,
pipe_id, queue_id);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
index 71699ad97d74..4c25ef504f79 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -32,7 +32,7 @@ int kfd_pasid_init(void)
{
pasid_limit = max_num_of_processes;
- pasid_bitmap = kzalloc(BITS_TO_LONGS(pasid_limit), GFP_KERNEL);
+ pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);
if (!pasid_bitmap)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index f9fb81e3bb09..a5edb29507e3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -463,6 +463,24 @@ struct kfd_process {
bool is_32bit_user_mode;
};
+/**
+ * Ioctl function type.
+ *
+ * \param filep pointer to file structure.
+ * \param p amdkfd process pointer.
+ * \param data pointer to arg that was copied from user.
+ */
+typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p,
+ void *data);
+
+struct amdkfd_ioctl_desc {
+ unsigned int cmd;
+ int flags;
+ amdkfd_ioctl_t *func;
+ unsigned int cmd_drv;
+ const char *name;
+};
+
void kfd_process_create_wq(void);
void kfd_process_destroy_wq(void);
struct kfd_process *kfd_create_process(const struct task_struct *);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index b11792d7e70e..cca1708fd811 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -921,7 +921,7 @@ static int kfd_build_sysfs_node_tree(void)
uint32_t i = 0;
list_for_each_entry(dev, &topology_device_list, list) {
- ret = kfd_build_sysfs_node_entry(dev, 0);
+ ret = kfd_build_sysfs_node_entry(dev, i);
if (ret < 0)
return ret;
i++;
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 47b551970a14..96a512208fad 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -183,7 +183,7 @@ struct kfd2kgd_calls {
int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr);
- bool (*hqd_is_occupies)(struct kgd_dev *kgd, uint64_t queue_address,
+ bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 70d0f0f06f1a..e9f891c432f8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1756,8 +1756,6 @@ struct drm_i915_private {
*/
struct workqueue_struct *dp_wq;
- uint32_t bios_vgacntr;
-
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct {
int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 52adcb680be3..c11603b4cf1d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1048,6 +1048,7 @@ int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_pwrite *args = data;
struct drm_i915_gem_object *obj;
int ret;
@@ -1067,9 +1068,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
return -EFAULT;
}
+ intel_runtime_pm_get(dev_priv);
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
- return ret;
+ goto put_rpm;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) {
@@ -1121,6 +1124,9 @@ out:
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
+put_rpm:
+ intel_runtime_pm_put(dev_priv);
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 996c2931c499..d0d3dfbe6d2a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -3725,8 +3725,6 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
if ((iir & flip_pending) == 0)
goto check_page_flip;
- intel_prepare_page_flip(dev, plane);
-
/* We detect FlipDone by looking for the change in PendingFlip from '1'
* to '0' on the following vblank, i.e. IIR has the Pendingflip
* asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
@@ -3736,6 +3734,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
if (I915_READ16(ISR) & flip_pending)
goto check_page_flip;
+ intel_prepare_page_flip(dev, plane);
intel_finish_page_flip(dev, pipe);
return true;
@@ -3907,8 +3906,6 @@ static bool i915_handle_vblank(struct drm_device *dev,
if ((iir & flip_pending) == 0)
goto check_page_flip;
- intel_prepare_page_flip(dev, plane);
-
/* We detect FlipDone by looking for the change in PendingFlip from '1'
* to '0' on the following vblank, i.e. IIR has the Pendingflip
* asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
@@ -3918,6 +3915,7 @@ static bool i915_handle_vblank(struct drm_device *dev,
if (I915_READ(ISR) & flip_pending)
goto check_page_flip;
+ intel_prepare_page_flip(dev, plane);
intel_finish_page_flip(dev, pipe);
return true;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index fb3e3d429191..e2af1383b179 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -13057,11 +13057,7 @@ static void i915_disable_vga(struct drm_device *dev)
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
udelay(300);
- /*
- * Fujitsu-Siemens Lifebook S6010 (830) has problems resuming
- * from S3 without preserving (some of?) the other bits.
- */
- I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE);
+ I915_WRITE(vga_reg, VGA_DISP_DISABLE);
POSTING_READ(vga_reg);
}
@@ -13146,8 +13142,6 @@ void intel_modeset_init(struct drm_device *dev)
intel_shared_dpll_init(dev);
- /* save the BIOS value before clobbering it */
- dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev));
/* Just disable it once at startup */
i915_disable_vga(dev);
intel_setup_outputs(dev);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index f5a78d53e297..ac6da7102fbb 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -615,29 +615,6 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
vlv_power_sequencer_reset(dev_priv);
}
-static void check_power_well_state(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
-
- if (power_well->always_on || !i915.disable_power_well) {
- if (!enabled)
- goto mismatch;
-
- return;
- }
-
- if (enabled != (power_well->count > 0))
- goto mismatch;
-
- return;
-
-mismatch:
- WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
- power_well->name, power_well->always_on, enabled,
- power_well->count, i915.disable_power_well);
-}
-
/**
* intel_display_power_get - grab a power domain reference
* @dev_priv: i915 device instance
@@ -669,8 +646,6 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
power_well->ops->enable(dev_priv, power_well);
power_well->hw_enabled = true;
}
-
- check_power_well_state(dev_priv, power_well);
}
power_domains->domain_use_count[domain]++;
@@ -709,8 +684,6 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
power_well->hw_enabled = false;
power_well->ops->disable(dev_priv, power_well);
}
-
- check_power_well_state(dev_priv, power_well);
}
mutex_unlock(&power_domains->lock);
diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c
index ff2b434b3db4..760947e380c9 100644
--- a/drivers/gpu/drm/nouveau/core/core/event.c
+++ b/drivers/gpu/drm/nouveau/core/core/event.c
@@ -26,7 +26,7 @@
void
nvkm_event_put(struct nvkm_event *event, u32 types, int index)
{
- BUG_ON(!spin_is_locked(&event->refs_lock));
+ assert_spin_locked(&event->refs_lock);
while (types) {
int type = __ffs(types); types &= ~(1 << type);
if (--event->refs[index * event->types_nr + type] == 0) {
@@ -39,7 +39,7 @@ nvkm_event_put(struct nvkm_event *event, u32 types, int index)
void
nvkm_event_get(struct nvkm_event *event, u32 types, int index)
{
- BUG_ON(!spin_is_locked(&event->refs_lock));
+ assert_spin_locked(&event->refs_lock);
while (types) {
int type = __ffs(types); types &= ~(1 << type);
if (++event->refs[index * event->types_nr + type] == 1) {
diff --git a/drivers/gpu/drm/nouveau/core/core/notify.c b/drivers/gpu/drm/nouveau/core/core/notify.c
index d1bcde55e9d7..839a32577680 100644
--- a/drivers/gpu/drm/nouveau/core/core/notify.c
+++ b/drivers/gpu/drm/nouveau/core/core/notify.c
@@ -98,7 +98,7 @@ nvkm_notify_send(struct nvkm_notify *notify, void *data, u32 size)
struct nvkm_event *event = notify->event;
unsigned long flags;
- BUG_ON(!spin_is_locked(&event->list_lock));
+ assert_spin_locked(&event->list_lock);
BUG_ON(size != notify->size);
spin_lock_irqsave(&event->refs_lock, flags);
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index 674da1f095b2..732922690653 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -249,6 +249,39 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass;
break;
+ case 0x106:
+ device->cname = "GK208B";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = nv108_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ break;
case 0x108:
device->cname = "GK208";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
index 5e58bba0dd5c..a7a890fad1e5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
@@ -44,8 +44,10 @@ static void
pramin_fini(void *data)
{
struct priv *priv = data;
- nv_wr32(priv->bios, 0x001700, priv->bar0);
- kfree(priv);
+ if (priv) {
+ nv_wr32(priv->bios, 0x001700, priv->bar0);
+ kfree(priv);
+ }
}
static void *
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
index 00f2ca7e44a5..033a8e999497 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
@@ -24,34 +24,71 @@
#include "nv50.h"
+struct nvaa_ram_priv {
+ struct nouveau_ram base;
+ u64 poller_base;
+};
+
static int
nvaa_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 datasize,
struct nouveau_object **pobject)
{
- const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
- const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+ u32 rsvd_head = ( 256 * 1024); /* vga memory */
+ u32 rsvd_tail = (1024 * 1024); /* vbios etc */
struct nouveau_fb *pfb = nouveau_fb(parent);
- struct nouveau_ram *ram;
+ struct nvaa_ram_priv *priv;
int ret;
- ret = nouveau_ram_create(parent, engine, oclass, &ram);
- *pobject = nv_object(ram);
+ ret = nouveau_ram_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
if (ret)
return ret;
- ram->size = nv_rd32(pfb, 0x10020c);
- ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32);
+ priv->base.type = NV_MEM_TYPE_STOLEN;
+ priv->base.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
+ priv->base.size = (u64)nv_rd32(pfb, 0x100e14) << 12;
- ret = nouveau_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) -
- (rsvd_head + rsvd_tail), 1);
+ rsvd_tail += 0x1000;
+ priv->poller_base = priv->base.size - rsvd_tail;
+
+ ret = nouveau_mm_init(&pfb->vram, rsvd_head >> 12,
+ (priv->base.size - (rsvd_head + rsvd_tail)) >> 12,
+ 1);
if (ret)
return ret;
- ram->type = NV_MEM_TYPE_STOLEN;
- ram->stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
- ram->get = nv50_ram_get;
- ram->put = nv50_ram_put;
+ priv->base.get = nv50_ram_get;
+ priv->base.put = nv50_ram_put;
+ return 0;
+}
+
+static int
+nvaa_ram_init(struct nouveau_object *object)
+{
+ struct nouveau_fb *pfb = nouveau_fb(object);
+ struct nvaa_ram_priv *priv = (void *)object;
+ int ret;
+ u64 dniso, hostnb, flush;
+
+ ret = nouveau_ram_init(&priv->base);
+ if (ret)
+ return ret;
+
+ dniso = ((priv->base.size - (priv->poller_base + 0x00)) >> 5) - 1;
+ hostnb = ((priv->base.size - (priv->poller_base + 0x20)) >> 5) - 1;
+ flush = ((priv->base.size - (priv->poller_base + 0x40)) >> 5) - 1;
+
+ /* Enable NISO poller for various clients and set their associated
+ * read address, only for MCP77/78 and MCP79/7A. (fd#25701)
+ */
+ nv_wr32(pfb, 0x100c18, dniso);
+ nv_mask(pfb, 0x100c14, 0x00000000, 0x00000001);
+ nv_wr32(pfb, 0x100c1c, hostnb);
+ nv_mask(pfb, 0x100c14, 0x00000000, 0x00000002);
+ nv_wr32(pfb, 0x100c24, flush);
+ nv_mask(pfb, 0x100c14, 0x00000000, 0x00010000);
+
return 0;
}
@@ -60,7 +97,7 @@ nvaa_ram_oclass = {
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvaa_ram_ctor,
.dtor = _nouveau_ram_dtor,
- .init = _nouveau_ram_init,
+ .init = nvaa_ram_init,
.fini = _nouveau_ram_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
index a75c35ccf25c..165401c4045c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
@@ -24,13 +24,6 @@
#include "nv04.h"
-static void
-nv4c_mc_msi_rearm(struct nouveau_mc *pmc)
-{
- struct nv04_mc_priv *priv = (void *)pmc;
- nv_wr08(priv, 0x088050, 0xff);
-}
-
struct nouveau_oclass *
nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
.base.handle = NV_SUBDEV(MC, 0x4c),
@@ -41,5 +34,4 @@ nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
.fini = _nouveau_mc_fini,
},
.intr = nv04_mc_intr,
- .msi_rearm = nv4c_mc_msi_rearm,
}.base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 21ec561edc99..bba2960d3dfb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1572,8 +1572,10 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
* so use the DMA API for them.
*/
if (!nv_device_is_cpu_coherent(device) &&
- ttm->caching_state == tt_uncached)
+ ttm->caching_state == tt_uncached) {
ttm_dma_unpopulate(ttm_dma, dev->dev);
+ return;
+ }
#if __OS_HAS_AGP
if (drm->agp.stat == ENABLED) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 42c34babc2e5..bf0f9e21d714 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -36,7 +36,14 @@ void
nouveau_gem_object_del(struct drm_gem_object *gem)
{
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
+ struct device *dev = drm->dev->dev;
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (WARN_ON(ret < 0 && ret != -EACCES))
+ return;
if (gem->import_attach)
drm_prime_gem_destroy(gem, nvbo->bo.sg);
@@ -46,6 +53,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
/* reset filp so nouveau_bo_del_ttm() can test for it */
gem->filp = NULL;
ttm_bo_unref(&bo);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
}
int
@@ -53,7 +63,9 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct nouveau_vma *vma;
+ struct device *dev = drm->dev->dev;
int ret;
if (!cli->vm)
@@ -71,11 +83,16 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
goto out;
}
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0 && ret != -EACCES)
+ goto out;
+
ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
- if (ret) {
+ if (ret)
kfree(vma);
- goto out;
- }
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
} else {
vma->refcount++;
}
@@ -129,6 +146,8 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+ struct device *dev = drm->dev->dev;
struct nouveau_vma *vma;
int ret;
@@ -141,8 +160,14 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
vma = nouveau_bo_vma_find(nvbo, cli->vm);
if (vma) {
- if (--vma->refcount == 0)
- nouveau_gem_object_unmap(nvbo, vma);
+ if (--vma->refcount == 0) {
+ ret = pm_runtime_get_sync(dev);
+ if (!WARN_ON(ret < 0 && ret != -EACCES)) {
+ nouveau_gem_object_unmap(nvbo, vma);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ }
+ }
}
ttm_bo_unreserve(&nvbo->bo);
}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index d59ec491dbb9..ed644a4f6f57 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1851,10 +1851,9 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
return pll;
}
/* otherwise, pick one of the plls */
- if ((rdev->family == CHIP_KAVERI) ||
- (rdev->family == CHIP_KABINI) ||
+ if ((rdev->family == CHIP_KABINI) ||
(rdev->family == CHIP_MULLINS)) {
- /* KB/KV/ML has PPLL1 and PPLL2 */
+ /* KB/ML has PPLL1 and PPLL2 */
pll_in_use = radeon_get_pll_use_mask(crtc);
if (!(pll_in_use & (1 << ATOM_PPLL2)))
return ATOM_PPLL2;
@@ -1863,7 +1862,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
} else {
- /* CI has PPLL0, PPLL1, and PPLL2 */
+ /* CI/KV has PPLL0, PPLL1, and PPLL2 */
pll_in_use = radeon_get_pll_use_mask(crtc);
if (!(pll_in_use & (1 << ATOM_PPLL2)))
return ATOM_PPLL2;
@@ -2155,6 +2154,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
case ATOM_PPLL0:
/* disable the ppll */
if ((rdev->family == CHIP_ARUBA) ||
+ (rdev->family == CHIP_KAVERI) ||
(rdev->family == CHIP_BONAIRE) ||
(rdev->family == CHIP_HAWAII))
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 11ba9d21b89b..db42a670f995 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -492,6 +492,10 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
struct radeon_connector_atom_dig *dig_connector;
int dp_clock;
+ if ((mode->clock > 340000) &&
+ (!radeon_connector_is_dp12_capable(connector)))
+ return MODE_CLOCK_HIGH;
+
if (!radeon_connector->con_priv)
return MODE_CLOCK_HIGH;
dig_connector = radeon_connector->con_priv;
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index ba85986febea..03003f8a6de6 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -2156,4 +2156,6 @@
#define ATC_VM_APERTURE1_HIGH_ADDR 0x330Cu
#define ATC_VM_APERTURE1_LOW_ADDR 0x3304u
+#define IH_VMID_0_LUT 0x3D40u
+
#endif
diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
index 2fe8cfc966d9..bafdf92a5732 100644
--- a/drivers/gpu/drm/radeon/dce3_1_afmt.c
+++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
@@ -103,7 +103,7 @@ static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
- if (sad_count < 0) {
+ if (sad_count <= 0) {
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
return;
}
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 9b42001295ba..e3e9c10cfba9 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2745,13 +2745,11 @@ int kv_dpm_init(struct radeon_device *rdev)
pi->enable_auto_thermal_throttling = true;
pi->disable_nb_ps3_in_battery = false;
if (radeon_bapm == -1) {
- /* There are stability issues reported on with
- * bapm enabled on an asrock system.
- */
- if (rdev->pdev->subsystem_vendor == 0x1849)
- pi->bapm_enable = false;
- else
+ /* only enable bapm on KB, ML by default */
+ if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
pi->bapm_enable = true;
+ else
+ pi->bapm_enable = false;
} else if (radeon_bapm == 0) {
pi->bapm_enable = false;
} else {
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 242fd8b1b221..8bf87f1203cc 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -72,7 +72,7 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr);
-static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address,
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
@@ -92,7 +92,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
.init_memory = kgd_init_memory,
.init_pipeline = kgd_init_pipeline,
.hqd_load = kgd_hqd_load,
- .hqd_is_occupies = kgd_hqd_is_occupies,
+ .hqd_is_occupied = kgd_hqd_is_occupied,
.hqd_destroy = kgd_hqd_destroy,
.get_fw_version = get_fw_version
};
@@ -101,6 +101,7 @@ static const struct kgd2kfd_calls *kgd2kfd;
bool radeon_kfd_init(void)
{
+#if defined(CONFIG_HSA_AMD_MODULE)
bool (*kgd2kfd_init_p)(unsigned, const struct kfd2kgd_calls*,
const struct kgd2kfd_calls**);
@@ -117,6 +118,17 @@ bool radeon_kfd_init(void)
}
return true;
+#elif defined(CONFIG_HSA_AMD)
+ if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kfd2kgd, &kgd2kfd)) {
+ kgd2kfd = NULL;
+
+ return false;
+ }
+
+ return true;
+#else
+ return false;
+#endif
}
void radeon_kfd_fini(void)
@@ -378,6 +390,10 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
cpu_relax();
write_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
+ /* Mapping vmid to pasid also for IH block */
+ write_register(kgd, IH_VMID_0_LUT + vmid * sizeof(uint32_t),
+ pasid_mapping);
+
return 0;
}
@@ -517,7 +533,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
return 0;
}
-static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address,
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id)
{
uint32_t act;
@@ -556,6 +572,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
if (timeout == 0) {
pr_err("kfd: cp queue preemption time out (%dms)\n",
temp);
+ release_queue(kgd);
return -ETIME;
}
msleep(20);
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 535403e0c8a2..15aee723db77 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -1703,7 +1703,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
u32 format;
u32 *buffer;
const u8 __user *data;
- int size, dwords, tex_width, blit_width, spitch;
+ unsigned int size, dwords, tex_width, blit_width, spitch;
u32 height;
int i;
u32 texpitch, microtile;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 230b6f887cd8..dfdc26970022 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -27,7 +27,8 @@ if HID
config HID_BATTERY_STRENGTH
bool "Battery level reporting for HID devices"
- depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY
+ depends on HID
+ select POWER_SUPPLY
default n
---help---
This option adds support of reporting battery strength (for HID devices
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index c3d0ac1a0988..8b638792cb43 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1805,6 +1805,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 7460f3402298..9243359c1821 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -526,6 +526,7 @@
#define USB_DEVICE_ID_KYE_GPEN_560 0x5003
#define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010
#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X 0x5011
+#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2 0x501a
#define USB_DEVICE_ID_KYE_EASYPEN_M610X 0x5013
#define USB_VENDOR_ID_LABTEC 0x1020
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index e0a0f06ac5ef..9505605b6e22 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -312,6 +312,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO),
+ HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
{}
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
index b92bf01a1ae8..158fcf577fae 100644
--- a/drivers/hid/hid-kye.c
+++ b/drivers/hid/hid-kye.c
@@ -323,6 +323,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
}
break;
case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
+ case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
if (*rsize == MOUSEPEN_I608X_RDESC_ORIG_SIZE) {
rdesc = mousepen_i608x_rdesc_fixed;
*rsize = sizeof(mousepen_i608x_rdesc_fixed);
@@ -415,6 +416,7 @@ static int kye_probe(struct hid_device *hdev, const struct hid_device_id *id)
switch (id->product) {
case USB_DEVICE_ID_KYE_EASYPEN_I405X:
case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
+ case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
case USB_DEVICE_ID_KYE_EASYPEN_M610X:
ret = kye_tablet_enable(hdev);
if (ret) {
@@ -446,6 +448,8 @@ static const struct hid_device_id kye_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+ USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
USB_DEVICE_ID_KYE_EASYPEN_M610X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index c917ab61aafa..5bc6d80d5be7 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -962,10 +962,24 @@ static int logi_dj_raw_event(struct hid_device *hdev,
switch (data[0]) {
case REPORT_ID_DJ_SHORT:
+ if (size != DJREPORT_SHORT_LENGTH) {
+ dev_err(&hdev->dev, "DJ report of bad size (%d)", size);
+ return false;
+ }
return logi_dj_dj_event(hdev, report, data, size);
case REPORT_ID_HIDPP_SHORT:
- /* intentional fallthrough */
+ if (size != HIDPP_REPORT_SHORT_LENGTH) {
+ dev_err(&hdev->dev,
+ "Short HID++ report of bad size (%d)", size);
+ return false;
+ }
+ return logi_dj_hidpp_event(hdev, report, data, size);
case REPORT_ID_HIDPP_LONG:
+ if (size != HIDPP_REPORT_LONG_LENGTH) {
+ dev_err(&hdev->dev,
+ "Long HID++ report of bad size (%d)", size);
+ return false;
+ }
return logi_dj_hidpp_event(hdev, report, data, size);
}
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 2f420c0b6609..a93cefe0e522 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -282,6 +282,33 @@ static inline bool hidpp_report_is_connect_event(struct hidpp_report *report)
(report->rap.sub_id == 0x41);
}
+/**
+ * hidpp_prefix_name() prefixes the current given name with "Logitech ".
+ */
+static void hidpp_prefix_name(char **name, int name_length)
+{
+#define PREFIX_LENGTH 9 /* "Logitech " */
+
+ int new_length;
+ char *new_name;
+
+ if (name_length > PREFIX_LENGTH &&
+ strncmp(*name, "Logitech ", PREFIX_LENGTH) == 0)
+ /* The prefix has is already in the name */
+ return;
+
+ new_length = PREFIX_LENGTH + name_length;
+ new_name = kzalloc(new_length, GFP_KERNEL);
+ if (!new_name)
+ return;
+
+ snprintf(new_name, new_length, "Logitech %s", *name);
+
+ kfree(*name);
+
+ *name = new_name;
+}
+
/* -------------------------------------------------------------------------- */
/* HIDP++ 1.0 commands */
/* -------------------------------------------------------------------------- */
@@ -321,6 +348,10 @@ static char *hidpp_get_unifying_name(struct hidpp_device *hidpp_dev)
return NULL;
memcpy(name, &response.rap.params[2], len);
+
+ /* include the terminating '\0' */
+ hidpp_prefix_name(&name, len + 1);
+
return name;
}
@@ -498,6 +529,9 @@ static char *hidpp_get_device_name(struct hidpp_device *hidpp)
index += ret;
}
+ /* include the terminating '\0' */
+ hidpp_prefix_name(&name, __name_length + 1);
+
return name;
}
@@ -794,18 +828,25 @@ static int wtp_raw_event(struct hid_device *hdev, u8 *data, int size)
switch (data[0]) {
case 0x02:
+ if (size < 2) {
+ hid_err(hdev, "Received HID report of bad size (%d)",
+ size);
+ return 1;
+ }
if (hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS) {
input_event(wd->input, EV_KEY, BTN_LEFT,
!!(data[1] & 0x01));
input_event(wd->input, EV_KEY, BTN_RIGHT,
!!(data[1] & 0x02));
input_sync(wd->input);
+ return 0;
} else {
if (size < 21)
return 1;
return wtp_mouse_raw_xy_event(hidpp, &data[7]);
}
case REPORT_ID_HIDPP_LONG:
+ /* size is already checked in hidpp_raw_event. */
if ((report->fap.feature_index != wd->mt_feature_index) ||
(report->fap.funcindex_clientid != EVENT_TOUCHPAD_RAW_XY))
return 1;
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index 1a07e07d99a0..47d7e74231e5 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -35,6 +35,8 @@ static struct class *pyra_class;
static void profile_activated(struct pyra_device *pyra,
unsigned int new_profile)
{
+ if (new_profile >= ARRAY_SIZE(pyra->profile_settings))
+ return;
pyra->actual_profile = new_profile;
pyra->actual_cpi = pyra->profile_settings[pyra->actual_profile].y_cpi;
}
@@ -257,9 +259,11 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,
if (off != 0 || count != PYRA_SIZE_SETTINGS)
return -EINVAL;
- mutex_lock(&pyra->pyra_lock);
-
settings = (struct pyra_settings const *)buf;
+ if (settings->startup_profile >= ARRAY_SIZE(pyra->profile_settings))
+ return -EINVAL;
+
+ mutex_lock(&pyra->pyra_lock);
retval = pyra_set_settings(usb_dev, settings);
if (retval) {
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index d32037cbf9db..d43e967e7533 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -706,12 +706,7 @@ static int i2c_hid_start(struct hid_device *hid)
static void i2c_hid_stop(struct hid_device *hid)
{
- struct i2c_client *client = hid->driver_data;
- struct i2c_hid *ihid = i2c_get_clientdata(client);
-
hid->claimed = 0;
-
- i2c_hid_free_buffers(ihid);
}
static int i2c_hid_open(struct hid_device *hid)
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index dc89be90b35e..b27b3d33ebab 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -124,6 +124,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 1232336b960e..40dfbc0444c0 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4029,14 +4029,6 @@ static int device_notifier(struct notifier_block *nb,
if (action != BUS_NOTIFY_REMOVED_DEVICE)
return 0;
- /*
- * If the device is still attached to a device driver we can't
- * tear down the domain yet as DMA mappings may still be in use.
- * Wait for the BUS_NOTIFY_UNBOUND_DRIVER event to do that.
- */
- if (action == BUS_NOTIFY_DEL_DEVICE && dev->driver != NULL)
- return 0;
-
domain = find_domain(dev);
if (!domain)
return 0;
@@ -4428,6 +4420,10 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
domain_remove_one_dev_info(old_domain, dev);
else
domain_remove_dev_info(old_domain);
+
+ if (!domain_type_is_vm_or_si(old_domain) &&
+ list_empty(&old_domain->devices))
+ domain_exit(old_domain);
}
}
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 68dfb0fd5ee9..748693192c20 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -558,7 +558,7 @@ static pmd_t *ipmmu_alloc_pmd(struct ipmmu_vmsa_device *mmu, pgd_t *pgd,
static u64 ipmmu_page_prot(unsigned int prot, u64 type)
{
- u64 pgprot = ARM_VMSA_PTE_XN | ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF
+ u64 pgprot = ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF
| ARM_VMSA_PTE_SH_IS | ARM_VMSA_PTE_AP_UNPRIV
| ARM_VMSA_PTE_NS | type;
@@ -568,8 +568,8 @@ static u64 ipmmu_page_prot(unsigned int prot, u64 type)
if (prot & IOMMU_CACHE)
pgprot |= IMMAIR_ATTR_IDX_WBRWA << ARM_VMSA_PTE_ATTRINDX_SHIFT;
- if (prot & IOMMU_EXEC)
- pgprot &= ~ARM_VMSA_PTE_XN;
+ if (prot & IOMMU_NOEXEC)
+ pgprot |= ARM_VMSA_PTE_XN;
else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
/* If no access create a faulting entry to avoid TLB fills. */
pgprot &= ~ARM_VMSA_PTE_PAGE;
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index b2023af384b9..6a8b1ec4a48a 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1009,7 +1009,6 @@ static struct platform_driver rk_iommu_driver = {
.remove = rk_iommu_remove,
.driver = {
.name = "rk_iommu",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(rk_iommu_dt_ids),
},
};
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index a82e542ffc21..0b380603a578 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -4880,7 +4880,7 @@ static void sig_ind(PLCI *plci)
byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/
byte CF_Ind[] = "\x09\x02\x00\x06\x00\x00\x00\x00\x00\x00";
byte Interr_Err_Ind[] = "\x0a\x02\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00";
- byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\0x00\0x00\0x00\0x00";
+ byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\x00\x00\x00\x00";
byte force_mt_info = false;
byte dir;
dword d;
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index 26515c27ea8c..25e419752a7b 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -330,18 +330,18 @@ create_netxbig_led(struct platform_device *pdev,
led_dat->sata = 0;
led_dat->cdev.brightness = LED_OFF;
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
- /*
- * If available, expose the SATA activity blink capability through
- * a "sata" sysfs attribute.
- */
- if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
- led_dat->cdev.groups = netxbig_led_groups;
led_dat->mode_addr = template->mode_addr;
led_dat->mode_val = template->mode_val;
led_dat->bright_addr = template->bright_addr;
led_dat->bright_max = (1 << pdata->gpio_ext->num_data) - 1;
led_dat->timer = pdata->timer;
led_dat->num_timer = pdata->num_timer;
+ /*
+ * If available, expose the SATA activity blink capability through
+ * a "sata" sysfs attribute.
+ */
+ if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
+ led_dat->cdev.groups = netxbig_led_groups;
return led_classdev_register(&pdev->dev, &led_dat->cdev);
}
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 51fd6b524371..d1b55fe62817 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -100,6 +100,46 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
return 0;
}
+static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct cxl_context *ctx = vma->vm_file->private_data;
+ unsigned long address = (unsigned long)vmf->virtual_address;
+ u64 area, offset;
+
+ offset = vmf->pgoff << PAGE_SHIFT;
+
+ pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
+ __func__, ctx->pe, address, offset);
+
+ if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
+ area = ctx->afu->psn_phys;
+ if (offset > ctx->afu->adapter->ps_size)
+ return VM_FAULT_SIGBUS;
+ } else {
+ area = ctx->psn_phys;
+ if (offset > ctx->psn_size)
+ return VM_FAULT_SIGBUS;
+ }
+
+ mutex_lock(&ctx->status_mutex);
+
+ if (ctx->status != STARTED) {
+ mutex_unlock(&ctx->status_mutex);
+ pr_devel("%s: Context not started, failing problem state access\n", __func__);
+ return VM_FAULT_SIGBUS;
+ }
+
+ vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
+
+ mutex_unlock(&ctx->status_mutex);
+
+ return VM_FAULT_NOPAGE;
+}
+
+static const struct vm_operations_struct cxl_mmap_vmops = {
+ .fault = cxl_mmap_fault,
+};
+
/*
* Map a per-context mmio space into the given vma.
*/
@@ -108,26 +148,25 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
u64 len = vma->vm_end - vma->vm_start;
len = min(len, ctx->psn_size);
- if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- return vm_iomap_memory(vma, ctx->afu->psn_phys, ctx->afu->adapter->ps_size);
- }
+ if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
+ /* make sure there is a valid per process space for this AFU */
+ if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
+ pr_devel("AFU doesn't support mmio space\n");
+ return -EINVAL;
+ }
- /* make sure there is a valid per process space for this AFU */
- if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
- pr_devel("AFU doesn't support mmio space\n");
- return -EINVAL;
+ /* Can't mmap until the AFU is enabled */
+ if (!ctx->afu->enabled)
+ return -EBUSY;
}
- /* Can't mmap until the AFU is enabled */
- if (!ctx->afu->enabled)
- return -EBUSY;
-
pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
ctx->psn_phys, ctx->pe , ctx->master);
+ vma->vm_flags |= VM_IO | VM_PFNMAP;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- return vm_iomap_memory(vma, ctx->psn_phys, len);
+ vma->vm_ops = &cxl_mmap_vmops;
+ return 0;
}
/*
@@ -150,12 +189,6 @@ static void __detach_context(struct cxl_context *ctx)
afu_release_irqs(ctx);
flush_work(&ctx->fault_work); /* Only needed for dedicated process */
wake_up_all(&ctx->wq);
-
- /* Release Problem State Area mapping */
- mutex_lock(&ctx->mapping_lock);
- if (ctx->mapping)
- unmap_mapping_range(ctx->mapping, 0, 0, 1);
- mutex_unlock(&ctx->mapping_lock);
}
/*
@@ -184,6 +217,17 @@ void cxl_context_detach_all(struct cxl_afu *afu)
* created and torn down after the IDR removed
*/
__detach_context(ctx);
+
+ /*
+ * We are force detaching - remove any active PSA mappings so
+ * userspace cannot interfere with the card if it comes back.
+ * Easiest way to exercise this is to unbind and rebind the
+ * driver via sysfs while it is in use.
+ */
+ mutex_lock(&ctx->mapping_lock);
+ if (ctx->mapping)
+ unmap_mapping_range(ctx->mapping, 0, 0, 1);
+ mutex_unlock(&ctx->mapping_lock);
}
mutex_unlock(&afu->contexts_lock);
}
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index e9f2f10dbb37..b15d8113877c 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -140,18 +140,20 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
pr_devel("%s: pe: %i\n", __func__, ctx->pe);
- mutex_lock(&ctx->status_mutex);
- if (ctx->status != OPENED) {
- rc = -EIO;
- goto out;
- }
-
+ /* Do this outside the status_mutex to avoid a circular dependency with
+ * the locking in cxl_mmap_fault() */
if (copy_from_user(&work, uwork,
sizeof(struct cxl_ioctl_start_work))) {
rc = -EFAULT;
goto out;
}
+ mutex_lock(&ctx->status_mutex);
+ if (ctx->status != OPENED) {
+ rc = -EIO;
+ goto out;
+ }
+
/*
* if any of the reserved fields are set or any of the unused
* flags are set it's invalid
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index e3e56d35f0ee..970314e0aac8 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -247,6 +247,7 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
{ "INT33BB" , "3" , &sdhci_acpi_slot_int_sd },
{ "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio },
{ "INT3436" , NULL, &sdhci_acpi_slot_int_sdio },
+ { "INT344D" , NULL, &sdhci_acpi_slot_int_sdio },
{ "PNP0D40" },
{ },
};
@@ -257,6 +258,7 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
{ "INT33BB" },
{ "INT33C6" },
{ "INT3436" },
+ { "INT344D" },
{ "PNP0D40" },
{ },
};
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 03427755b902..4f38554ce679 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -993,6 +993,31 @@ static const struct pci_device_id pci_ids[] = {
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_intel_mrfl_mmc,
},
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_SPT_EMMC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_SPT_SDIO,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_SPT_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
+ },
+
{
.vendor = PCI_VENDOR_ID_O2,
.device = PCI_DEVICE_ID_O2_8120,
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index d57c3d169914..1ec684d06d54 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -21,6 +21,9 @@
#define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5
#define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6
#define PCI_DEVICE_ID_INTEL_QRK_SD 0x08A7
+#define PCI_DEVICE_ID_INTEL_SPT_EMMC 0x9d2b
+#define PCI_DEVICE_ID_INTEL_SPT_SDIO 0x9d2c
+#define PCI_DEVICE_ID_INTEL_SPT_SD 0x9d2d
/*
* PCI registers
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 45238871192d..ca3424e7ef71 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -300,13 +300,6 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
if (IS_ERR(host))
return PTR_ERR(host);
- if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
- ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
- if (ret < 0)
- goto err_mbus_win;
- }
-
-
pltfm_host = sdhci_priv(host);
pltfm_host->priv = pxa;
@@ -325,6 +318,12 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
if (!IS_ERR(pxa->clk_core))
clk_prepare_enable(pxa->clk_core);
+ if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
+ ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
+ if (ret < 0)
+ goto err_mbus_win;
+ }
+
/* enable 1/8V DDR capable */
host->mmc->caps |= MMC_CAP_1_8V_DDR;
@@ -396,11 +395,11 @@ err_add_host:
pm_runtime_disable(&pdev->dev);
err_of_parse:
err_cd_req:
+err_mbus_win:
clk_disable_unprepare(pxa->clk_io);
if (!IS_ERR(pxa->clk_core))
clk_disable_unprepare(pxa->clk_core);
err_clk_get:
-err_mbus_win:
sdhci_pltfm_free(pdev);
return ret;
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index cbb245b58538..f1a488ee432f 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -259,8 +259,6 @@ static void sdhci_reinit(struct sdhci_host *host)
del_timer_sync(&host->tuning_timer);
host->flags &= ~SDHCI_NEEDS_RETUNING;
- host->mmc->max_blk_count =
- (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
}
sdhci_enable_card_detection(host);
}
@@ -1273,6 +1271,12 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
spin_unlock_irq(&host->lock);
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
spin_lock_irq(&host->lock);
+
+ if (mode != MMC_POWER_OFF)
+ sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
+ else
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+
return;
}
@@ -1353,6 +1357,8 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
sdhci_runtime_pm_get(host);
+ present = mmc_gpio_get_cd(host->mmc);
+
spin_lock_irqsave(&host->lock, flags);
WARN_ON(host->mrq != NULL);
@@ -1381,7 +1387,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
* zero: cd-gpio is used, and card is removed
* one: cd-gpio is used, and card is present
*/
- present = mmc_gpio_get_cd(host->mmc);
if (present < 0) {
/* If polling, assume that the card is always present. */
if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
@@ -1880,6 +1885,18 @@ static int sdhci_card_busy(struct mmc_host *mmc)
return !(present_state & SDHCI_DATA_LVL_MASK);
}
+static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->flags |= SDHCI_HS400_TUNING;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return 0;
+}
+
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
@@ -1887,10 +1904,18 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
int tuning_loop_counter = MAX_TUNING_LOOP;
int err = 0;
unsigned long flags;
+ unsigned int tuning_count = 0;
+ bool hs400_tuning;
sdhci_runtime_pm_get(host);
spin_lock_irqsave(&host->lock, flags);
+ hs400_tuning = host->flags & SDHCI_HS400_TUNING;
+ host->flags &= ~SDHCI_HS400_TUNING;
+
+ if (host->tuning_mode == SDHCI_TUNING_MODE_1)
+ tuning_count = host->tuning_count;
+
/*
* The Host Controller needs tuning only in case of SDR104 mode
* and for SDR50 mode when Use Tuning for SDR50 is set in the
@@ -1899,8 +1924,20 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
* tuning function has to be executed.
*/
switch (host->timing) {
+ /* HS400 tuning is done in HS200 mode */
case MMC_TIMING_MMC_HS400:
+ err = -EINVAL;
+ goto out_unlock;
+
case MMC_TIMING_MMC_HS200:
+ /*
+ * Periodic re-tuning for HS400 is not expected to be needed, so
+ * disable it here.
+ */
+ if (hs400_tuning)
+ tuning_count = 0;
+ break;
+
case MMC_TIMING_UHS_SDR104:
break;
@@ -1911,9 +1948,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
/* FALLTHROUGH */
default:
- spin_unlock_irqrestore(&host->lock, flags);
- sdhci_runtime_pm_put(host);
- return 0;
+ goto out_unlock;
}
if (host->ops->platform_execute_tuning) {
@@ -2037,24 +2072,11 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
}
out:
- /*
- * If this is the very first time we are here, we start the retuning
- * timer. Since only during the first time, SDHCI_NEEDS_RETUNING
- * flag won't be set, we check this condition before actually starting
- * the timer.
- */
- if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count &&
- (host->tuning_mode == SDHCI_TUNING_MODE_1)) {
+ host->flags &= ~SDHCI_NEEDS_RETUNING;
+
+ if (tuning_count) {
host->flags |= SDHCI_USING_RETUNING_TIMER;
- mod_timer(&host->tuning_timer, jiffies +
- host->tuning_count * HZ);
- /* Tuning mode 1 limits the maximum data length to 4MB */
- mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size;
- } else if (host->flags & SDHCI_USING_RETUNING_TIMER) {
- host->flags &= ~SDHCI_NEEDS_RETUNING;
- /* Reload the new initial value for timer */
- mod_timer(&host->tuning_timer, jiffies +
- host->tuning_count * HZ);
+ mod_timer(&host->tuning_timer, jiffies + tuning_count * HZ);
}
/*
@@ -2070,6 +2092,7 @@ out:
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+out_unlock:
spin_unlock_irqrestore(&host->lock, flags);
sdhci_runtime_pm_put(host);
@@ -2110,15 +2133,18 @@ static void sdhci_card_event(struct mmc_host *mmc)
{
struct sdhci_host *host = mmc_priv(mmc);
unsigned long flags;
+ int present;
/* First check if client has provided their own card event */
if (host->ops->card_event)
host->ops->card_event(host);
+ present = sdhci_do_get_cd(host);
+
spin_lock_irqsave(&host->lock, flags);
/* Check host->mrq first in case we are runtime suspended */
- if (host->mrq && !sdhci_do_get_cd(host)) {
+ if (host->mrq && !present) {
pr_err("%s: Card removed during transfer!\n",
mmc_hostname(host->mmc));
pr_err("%s: Resetting controller.\n",
@@ -2142,6 +2168,7 @@ static const struct mmc_host_ops sdhci_ops = {
.hw_reset = sdhci_hw_reset,
.enable_sdio_irq = sdhci_enable_sdio_irq,
.start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
+ .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
.execute_tuning = sdhci_execute_tuning,
.card_event = sdhci_card_event,
.card_busy = sdhci_card_busy,
@@ -3260,8 +3287,9 @@ int sdhci_add_host(struct sdhci_host *host)
mmc->max_segs = SDHCI_MAX_SEGS;
/*
- * Maximum number of sectors in one transfer. Limited by DMA boundary
- * size (512KiB).
+ * Maximum number of sectors in one transfer. Limited by SDMA boundary
+ * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
+ * is less anyway.
*/
mmc->max_req_size = 524288;
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 1fcd5568a352..f3470d96837a 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -850,8 +850,10 @@ static int emac_probe(struct platform_device *pdev)
}
db->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(db->clk))
+ if (IS_ERR(db->clk)) {
+ ret = PTR_ERR(db->clk);
goto out;
+ }
clk_prepare_enable(db->clk);
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 3498760dc22a..760c72c6e2ac 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -1170,10 +1170,6 @@ tx_request_irq_error:
init_error:
free_skbufs(dev);
alloc_skbuf_error:
- if (priv->phydev) {
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
- }
phy_error:
return ret;
}
@@ -1186,12 +1182,9 @@ static int tse_shutdown(struct net_device *dev)
int ret;
unsigned long int flags;
- /* Stop and disconnect the PHY */
- if (priv->phydev) {
+ /* Stop the PHY */
+ if (priv->phydev)
phy_stop(priv->phydev);
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
- }
netif_stop_queue(dev);
napi_disable(&priv->napi);
@@ -1525,6 +1518,10 @@ err_free_netdev:
static int altera_tse_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
+ struct altera_tse_private *priv = netdev_priv(ndev);
+
+ if (priv->phydev)
+ phy_disconnect(priv->phydev);
platform_set_drvdata(pdev, NULL);
altera_tse_mdio_destroy(ndev);
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index e398eda07298..c8af3ce3ea38 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -184,15 +184,16 @@ static void alx_schedule_reset(struct alx_priv *alx)
schedule_work(&alx->reset_wk);
}
-static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
+static int alx_clean_rx_irq(struct alx_priv *alx, int budget)
{
struct alx_rx_queue *rxq = &alx->rxq;
struct alx_rrd *rrd;
struct alx_buffer *rxb;
struct sk_buff *skb;
u16 length, rfd_cleaned = 0;
+ int work = 0;
- while (budget > 0) {
+ while (work < budget) {
rrd = &rxq->rrd[rxq->rrd_read_idx];
if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
break;
@@ -203,7 +204,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
ALX_GET_FIELD(le32_to_cpu(rrd->word0),
RRD_NOR) != 1) {
alx_schedule_reset(alx);
- return 0;
+ return work;
}
rxb = &rxq->bufs[rxq->read_idx];
@@ -243,7 +244,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
}
napi_gro_receive(&alx->napi, skb);
- budget--;
+ work++;
next_pkt:
if (++rxq->read_idx == alx->rx_ringsz)
@@ -258,21 +259,22 @@ next_pkt:
if (rfd_cleaned)
alx_refill_rx_ring(alx, GFP_ATOMIC);
- return budget > 0;
+ return work;
}
static int alx_poll(struct napi_struct *napi, int budget)
{
struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
struct alx_hw *hw = &alx->hw;
- bool complete = true;
unsigned long flags;
+ bool tx_complete;
+ int work;
- complete = alx_clean_tx_irq(alx) &&
- alx_clean_rx_irq(alx, budget);
+ tx_complete = alx_clean_tx_irq(alx);
+ work = alx_clean_rx_irq(alx, budget);
- if (!complete)
- return 1;
+ if (!tx_complete || work == budget)
+ return budget;
napi_complete(&alx->napi);
@@ -284,7 +286,7 @@ static int alx_poll(struct napi_struct *napi, int budget)
alx_post_write(hw);
- return 0;
+ return work;
}
static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 553dcd8a9df2..96bf01ba32dd 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7413,6 +7413,8 @@ static inline void tg3_netif_start(struct tg3 *tp)
}
static void tg3_irq_quiesce(struct tg3 *tp)
+ __releases(tp->lock)
+ __acquires(tp->lock)
{
int i;
@@ -7421,8 +7423,12 @@ static void tg3_irq_quiesce(struct tg3 *tp)
tp->irq_sync = 1;
smp_mb();
+ spin_unlock_bh(&tp->lock);
+
for (i = 0; i < tp->irq_cnt; i++)
synchronize_irq(tp->napi[i].irq_vec);
+
+ spin_lock_bh(&tp->lock);
}
/* Fully shutdown all tg3 driver activity elsewhere in the system.
@@ -9018,6 +9024,8 @@ static void tg3_restore_clk(struct tg3 *tp)
/* tp->lock is held. */
static int tg3_chip_reset(struct tg3 *tp)
+ __releases(tp->lock)
+ __acquires(tp->lock)
{
u32 val;
void (*write_op)(struct tg3 *, u32, u32);
@@ -9073,9 +9081,13 @@ static int tg3_chip_reset(struct tg3 *tp)
}
smp_mb();
+ tg3_full_unlock(tp);
+
for (i = 0; i < tp->irq_cnt; i++)
synchronize_irq(tp->napi[i].irq_vec);
+ tg3_full_lock(tp, 0);
+
if (tg3_asic_rev(tp) == ASIC_REV_57780) {
val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
@@ -10903,11 +10915,13 @@ static void tg3_timer(unsigned long __opaque)
{
struct tg3 *tp = (struct tg3 *) __opaque;
- if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
- goto restart_timer;
-
spin_lock(&tp->lock);
+ if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
+ spin_unlock(&tp->lock);
+ goto restart_timer;
+ }
+
if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
tg3_flag(tp, 57765_CLASS))
tg3_chk_missed_msi(tp);
@@ -11101,11 +11115,13 @@ static void tg3_reset_task(struct work_struct *work)
struct tg3 *tp = container_of(work, struct tg3, reset_task);
int err;
+ rtnl_lock();
tg3_full_lock(tp, 0);
if (!netif_running(tp->dev)) {
tg3_flag_clear(tp, RESET_TASK_PENDING);
tg3_full_unlock(tp);
+ rtnl_unlock();
return;
}
@@ -11138,6 +11154,7 @@ out:
tg3_phy_start(tp);
tg3_flag_clear(tp, RESET_TASK_PENDING);
+ rtnl_unlock();
}
static int tg3_request_irq(struct tg3 *tp, int irq_num)
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 55eb7f2af2b4..7ef55f5fa664 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -340,7 +340,7 @@ static int __init at91ether_probe(struct platform_device *pdev)
res = PTR_ERR(lp->pclk);
goto err_free_dev;
}
- clk_enable(lp->pclk);
+ clk_prepare_enable(lp->pclk);
lp->hclk = ERR_PTR(-ENOENT);
lp->tx_clk = ERR_PTR(-ENOENT);
@@ -406,7 +406,7 @@ static int __init at91ether_probe(struct platform_device *pdev)
err_out_unregister_netdev:
unregister_netdev(dev);
err_disable_clock:
- clk_disable(lp->pclk);
+ clk_disable_unprepare(lp->pclk);
err_free_dev:
free_netdev(dev);
return res;
@@ -424,7 +424,7 @@ static int at91ether_remove(struct platform_device *pdev)
kfree(lp->mii_bus->irq);
mdiobus_free(lp->mii_bus);
unregister_netdev(dev);
- clk_disable(lp->pclk);
+ clk_disable_unprepare(lp->pclk);
free_netdev(dev);
return 0;
@@ -440,7 +440,7 @@ static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
netif_stop_queue(net_dev);
netif_device_detach(net_dev);
- clk_disable(lp->pclk);
+ clk_disable_unprepare(lp->pclk);
}
return 0;
}
@@ -451,7 +451,7 @@ static int at91ether_resume(struct platform_device *pdev)
struct macb *lp = netdev_priv(net_dev);
if (netif_running(net_dev)) {
- clk_enable(lp->pclk);
+ clk_prepare_enable(lp->pclk);
netif_device_attach(net_dev);
netif_start_queue(net_dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 2215d432a059..a936ee8958c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2430,7 +2430,7 @@ static void cfg_queues(struct adapter *adapter)
*/
n10g = 0;
for_each_port(adapter, pidx)
- n10g += is_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
+ n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
/*
* We default to 1 queue per non-10G port and up to # of cores queues
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 21dc9a20308c..60426cf890a7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -323,6 +323,8 @@ int t4vf_port_init(struct adapter *adapter, int pidx)
return v;
v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype);
+ pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ?
+ FW_PORT_CMD_MDIOADDR_G(v) : -1;
pi->port_type = FW_PORT_CMD_PTYPE_G(v);
pi->mod_type = FW_PORT_MOD_TYPE_NA;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 705f334ebb85..b29e027c476e 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1616,7 +1616,7 @@ static int enic_open(struct net_device *netdev)
if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
netdev_err(netdev, "Unable to alloc receive buffers\n");
err = -ENOMEM;
- goto err_out_notify_unset;
+ goto err_out_free_rq;
}
}
@@ -1649,7 +1649,9 @@ static int enic_open(struct net_device *netdev)
return 0;
-err_out_notify_unset:
+err_out_free_rq:
+ for (i = 0; i < enic->rq_count; i++)
+ vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
enic_dev_notify_unset(enic);
err_out_free_intr:
enic_free_intr(enic);
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index a379c3e4b57f..13d00a38a5bd 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -398,13 +398,8 @@ static int dnet_poll(struct napi_struct *napi, int budget)
* break out of while loop if there are no more
* packets waiting
*/
- if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) {
- napi_complete(napi);
- int_enable = dnet_readl(bp, INTR_ENB);
- int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
- dnet_writel(bp, int_enable, INTR_ENB);
- return 0;
- }
+ if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16))
+ break;
cmd_word = dnet_readl(bp, RX_LEN_FIFO);
pkt_len = cmd_word & 0xFFFF;
@@ -433,20 +428,17 @@ static int dnet_poll(struct napi_struct *napi, int budget)
"size %u.\n", dev->name, pkt_len);
}
- budget -= npackets;
-
if (npackets < budget) {
/* We processed all packets available. Tell NAPI it can
- * stop polling then re-enable rx interrupts */
+ * stop polling then re-enable rx interrupts.
+ */
napi_complete(napi);
int_enable = dnet_readl(bp, INTR_ENB);
int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
dnet_writel(bp, int_enable, INTR_ENB);
- return 0;
}
- /* There are still packets waiting */
- return 1;
+ return npackets;
}
static irqreturn_t dnet_interrupt(int irq, void *dev_id)
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 469691ad4a1e..40132929daf7 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -424,6 +424,8 @@ struct bufdesc_ex {
* (40ns * 6).
*/
#define FEC_QUIRK_BUG_CAPTURE (1 << 10)
+/* Controller has only one MDIO bus */
+#define FEC_QUIRK_SINGLE_MDIO (1 << 11)
struct fec_enet_priv_tx_q {
int index;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 5ebdf8dc8a31..bba87775419d 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -91,7 +91,8 @@ static struct platform_device_id fec_devtype[] = {
.driver_data = 0,
}, {
.name = "imx28-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
+ .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
+ FEC_QUIRK_SINGLE_MDIO,
}, {
.name = "imx6q-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
@@ -1937,7 +1938,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
int err = -ENXIO, i;
/*
- * The dual fec interfaces are not equivalent with enet-mac.
+ * The i.MX28 dual fec interfaces are not equal.
* Here are the differences:
*
* - fec0 supports MII & RMII modes while fec1 only supports RMII
@@ -1952,7 +1953,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
* mdio interface in board design, and need to be configured by
* fec0 mii_bus.
*/
- if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
+ if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
/* fec1 uses fec0 mii_bus */
if (mii_cnt && fec0_mii_bus) {
fep->mii_bus = fec0_mii_bus;
@@ -2015,7 +2016,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
mii_cnt++;
/* save fec0 mii_bus */
- if (fep->quirks & FEC_QUIRK_ENET_MAC)
+ if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
fec0_mii_bus = fep->mii_bus;
return 0;
@@ -3129,6 +3130,7 @@ fec_probe(struct platform_device *pdev)
pdev->id_entry = of_id->data;
fep->quirks = pdev->id_entry->driver_data;
+ fep->netdev = ndev;
fep->num_rx_queues = num_rx_qs;
fep->num_tx_queues = num_tx_qs;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 5b8300a32bf5..4d61ef50b465 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -281,6 +281,17 @@ config I40E_DCB
If unsure, say N.
+config I40E_FCOE
+ bool "Fibre Channel over Ethernet (FCoE)"
+ default n
+ depends on I40E && DCB && FCOE
+ ---help---
+ Say Y here if you want to use Fibre Channel over Ethernet (FCoE)
+ in the driver. This will create new netdev for exclusive FCoE
+ use with XL710 FCoE offloads enabled.
+
+ If unsure, say N.
+
config I40EVF
tristate "Intel(R) XL710 X710 Virtual Function Ethernet support"
depends on PCI_MSI
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 781065eb5431..e9c3a87e5b11 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1543,7 +1543,7 @@ static int e100_phy_init(struct nic *nic)
mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
} else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
(mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
- !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
+ (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
/* enable/disable MDI/MDI-X auto-switching. */
mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index 4b94ddb29c24..c40581999121 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -44,4 +44,4 @@ i40e-objs := i40e_main.o \
i40e_virtchnl_pf.o
i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
-i40e-$(CONFIG_FCOE:m=y) += i40e_fcoe.o
+i40e-$(CONFIG_I40E_FCOE) += i40e_fcoe.o
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 433a55886ad2..cb0de455683e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -829,7 +829,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
if (desc_n >= ring->count || desc_n < 0) {
dev_info(&pf->pdev->dev,
"descriptor %d not found\n", desc_n);
- return;
+ goto out;
}
if (!is_rx_ring) {
txd = I40E_TX_DESC(ring, desc_n);
@@ -855,6 +855,8 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
} else {
dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n");
}
+
+out:
kfree(ring);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
index 045b5c4b98b3..ad802dd0f67a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -78,7 +78,7 @@ do { \
} while (0)
typedef enum i40e_status_code i40e_status;
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifdef CONFIG_I40E_FCOE
#define I40E_FCOE
-#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
+#endif
#endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 04b441460bbd..cecb340898fe 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -658,6 +658,8 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
return le32_to_cpu(*(volatile __le32 *)head);
}
+#define WB_STRIDE 0x3
+
/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
* @tx_ring: tx ring to clean
@@ -759,6 +761,18 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring->q_vector->tx.total_packets += total_packets;
+ /* check to see if there are any non-cache aligned descriptors
+ * waiting to be written back, and kick the hardware to force
+ * them to be written back in case of napi polling
+ */
+ if (budget &&
+ !((i & WB_STRIDE) == WB_STRIDE) &&
+ !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+ (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
+ tx_ring->arm_wb = true;
+ else
+ tx_ring->arm_wb = false;
+
if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
/* schedule immediate reset if we believe we hung */
dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
@@ -777,13 +791,16 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
dev_info(tx_ring->dev,
- "tx hang detected on queue %d, resetting adapter\n",
+ "tx hang detected on queue %d, reset requested\n",
tx_ring->queue_index);
- tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
+ /* do not fire the reset immediately, wait for the stack to
+ * decide we are truly stuck, also prevents every queue from
+ * simultaneously requesting a reset
+ */
- /* the adapter is about to reset, no point in enabling stuff */
- return true;
+ /* the adapter is about to reset, no point in enabling polling */
+ budget = 1;
}
netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
@@ -806,7 +823,25 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
}
}
- return budget > 0;
+ return !!budget;
+}
+
+/**
+ * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors
+ * @vsi: the VSI we care about
+ * @q_vector: the vector on which to force writeback
+ *
+ **/
+static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
+{
+ u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK
+ /* allow 00 to be written to the index */;
+
+ wr32(&vsi->back->hw,
+ I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
+ val);
}
/**
@@ -1290,9 +1325,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
* so the total length of IPv4 header is IHL*4 bytes
* The UDP_0 bit *may* bet set if the *inner* header is UDP
*/
- if (ipv4_tunnel &&
- (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
- !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
+ if (ipv4_tunnel) {
skb->transport_header = skb->mac_header +
sizeof(struct ethhdr) +
(ip_hdr(skb)->ihl * 4);
@@ -1302,15 +1335,19 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
skb->protocol == htons(ETH_P_8021AD))
? VLAN_HLEN : 0;
- rx_udp_csum = udp_csum(skb);
- iph = ip_hdr(skb);
- csum = csum_tcpudp_magic(
- iph->saddr, iph->daddr,
- (skb->len - skb_transport_offset(skb)),
- IPPROTO_UDP, rx_udp_csum);
+ if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
+ (udp_hdr(skb)->check != 0)) {
+ rx_udp_csum = udp_csum(skb);
+ iph = ip_hdr(skb);
+ csum = csum_tcpudp_magic(
+ iph->saddr, iph->daddr,
+ (skb->len - skb_transport_offset(skb)),
+ IPPROTO_UDP, rx_udp_csum);
- if (udp_hdr(skb)->check != csum)
- goto checksum_fail;
+ if (udp_hdr(skb)->check != csum)
+ goto checksum_fail;
+
+ } /* else its GRE and so no outer UDP header */
}
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1581,6 +1618,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
struct i40e_vsi *vsi = q_vector->vsi;
struct i40e_ring *ring;
bool clean_complete = true;
+ bool arm_wb = false;
int budget_per_ring;
if (test_bit(__I40E_DOWN, &vsi->state)) {
@@ -1591,8 +1629,10 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
/* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
- i40e_for_each_ring(ring, q_vector->tx)
+ i40e_for_each_ring(ring, q_vector->tx) {
clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
+ arm_wb |= ring->arm_wb;
+ }
/* We attempt to distribute budget to each Rx queue fairly, but don't
* allow the budget to go below 1 because that would exit polling early.
@@ -1603,8 +1643,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
/* If work not completed, return budget and polling will return */
- if (!clean_complete)
+ if (!clean_complete) {
+ if (arm_wb)
+ i40e_force_wb(vsi, q_vector);
return budget;
+ }
/* Work is done so exit the polling mode and re-enable the interrupt */
napi_complete(napi);
@@ -1840,17 +1883,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (err < 0)
return err;
- if (protocol == htons(ETH_P_IP)) {
- iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+
+ if (iph->version == 4) {
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
0, IPPROTO_TCP, 0);
- } else if (skb_is_gso_v6(skb)) {
-
- ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
- : ipv6_hdr(skb);
+ } else if (ipv6h->version == 6) {
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
ipv6h->payload_len = 0;
tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
@@ -1946,13 +1988,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
}
} else if (tx_flags & I40E_TX_FLAGS_IPV6) {
- if (tx_flags & I40E_TX_FLAGS_TSO) {
- *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ if (tx_flags & I40E_TX_FLAGS_TSO)
ip_hdr(skb)->check = 0;
- } else {
- *cd_tunneling |=
- I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
- }
}
/* Now set the ctx descriptor fields */
@@ -1962,7 +2000,10 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
((skb_inner_network_offset(skb) -
skb_transport_offset(skb)) >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
-
+ if (this_ip_hdr->version == 6) {
+ tx_flags &= ~I40E_TX_FLAGS_IPV4;
+ tx_flags |= I40E_TX_FLAGS_IPV6;
+ }
} else {
network_hdr_len = skb_network_header_len(skb);
this_ip_hdr = ip_hdr(skb);
@@ -2198,7 +2239,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
/* Place RS bit on last descriptor of any packet that spans across the
* 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
*/
-#define WB_STRIDE 0x3
if (((i & WB_STRIDE) != WB_STRIDE) &&
(first <= &tx_ring->tx_bi[i]) &&
(first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index e60d3accb2e2..18b00231d2f1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -241,6 +241,7 @@ struct i40e_ring {
unsigned long last_rx_timestamp;
bool ring_active; /* is ring online or not */
+ bool arm_wb; /* do something to arm write back */
/* stats structs */
struct i40e_queue_stats stats;
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 051ea94bdcd3..0f69ef81751a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1125,7 +1125,7 @@ static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
u32 swmask = mask;
u32 fwmask = mask << 16;
s32 ret_val = 0;
- s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
+ s32 i = 0, timeout = 200;
while (i < timeout) {
if (igb_get_hw_semaphore(hw)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 943cbd47d832..03e9eb0dc761 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1829,7 +1829,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_dev_cap(dev, &dev_cap);
if (err) {
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
- goto err_stop_fw;
+ return err;
}
choose_steering_mode(dev, &dev_cap);
@@ -1860,7 +1860,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
&init_hca);
if ((long long) icm_size < 0) {
err = icm_size;
- goto err_stop_fw;
+ return err;
}
dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
@@ -1874,7 +1874,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
if (err)
- goto err_stop_fw;
+ return err;
err = mlx4_INIT_HCA(dev, &init_hca);
if (err) {
@@ -1886,7 +1886,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_query_func(dev, &dev_cap);
if (err < 0) {
mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
- goto err_stop_fw;
+ goto err_close;
} else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
dev->caps.num_eqs = dev_cap.max_eqs;
dev->caps.reserved_eqs = dev_cap.reserved_eqs;
@@ -2006,11 +2006,6 @@ err_free_icm:
if (!mlx4_is_slave(dev))
mlx4_free_icms(dev);
-err_stop_fw:
- if (!mlx4_is_slave(dev)) {
- mlx4_UNMAP_FA(dev);
- mlx4_free_icm(dev, priv->fw.fw_icm, 0);
- }
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index d6f549685c0f..7094a9c70fd5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -584,6 +584,7 @@ EXPORT_SYMBOL_GPL(mlx4_mr_free);
void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
mlx4_mtt_cleanup(dev, &mr->mtt);
+ mr->mtt.order = -1;
}
EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup);
@@ -593,14 +594,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
{
int err;
- mpt_entry->start = cpu_to_be64(iova);
- mpt_entry->length = cpu_to_be64(size);
- mpt_entry->entity_size = cpu_to_be32(page_shift);
-
err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
if (err)
return err;
+ mpt_entry->start = cpu_to_be64(mr->iova);
+ mpt_entry->length = cpu_to_be64(mr->size);
+ mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
+
mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK |
MLX4_MPT_PD_FLAG_EN_INV);
mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE |
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index af099057f0e9..71af98bb72cb 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -4033,8 +4033,10 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
(void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
&mgp->cmd_bus, GFP_KERNEL);
- if (mgp->cmd == NULL)
+ if (!mgp->cmd) {
+ status = -ENOMEM;
goto abort_with_enabled;
+ }
mgp->board_span = pci_resource_len(pdev, 0);
mgp->iomem_base = pci_resource_start(pdev, 0);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index c2f09af5c25b..4847713211ca 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -146,10 +146,7 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
{
int i = 0;
- while (i < 10) {
- if (i)
- ssleep(1);
-
+ do {
if (ql_sem_lock(qdev,
QL_DRVR_SEM_MASK,
(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
@@ -158,7 +155,8 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
"driver lock acquired\n");
return 1;
}
- }
+ ssleep(1);
+ } while (++i < 10);
netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 9929b97cfb36..2528c3fb6b90 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2605,6 +2605,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} else {
dev_err(&pdev->dev,
"%s: failed. Please Reboot\n", __func__);
+ err = -ENODEV;
goto err_out_free_hw;
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index c29ba80ae02b..37583a9d8853 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -473,6 +473,7 @@ static struct sh_eth_cpu_data r8a777x_data = {
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
EESR_ECI,
+ .fdr_value = 0x00000f0f,
.apr = 1,
.mpr = 1,
@@ -495,6 +496,7 @@ static struct sh_eth_cpu_data r8a779x_data = {
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
EESR_ECI,
+ .fdr_value = 0x00000f0f,
.apr = 1,
.mpr = 1,
@@ -536,6 +538,8 @@ static struct sh_eth_cpu_data sh7724_data = {
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
EESR_ECI,
+ .trscer_err_mask = DESC_I_RINT8,
+
.apr = 1,
.mpr = 1,
.tpauser = 1,
@@ -856,6 +860,9 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
if (!cd->eesr_err_check)
cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
+
+ if (!cd->trscer_err_mask)
+ cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
}
static int sh_eth_check_reset(struct net_device *ndev)
@@ -1294,7 +1301,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
/* Frame recv control (enable multiple-packets per rx irq) */
sh_eth_write(ndev, RMCR_RNC, RMCR);
- sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
+ sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
if (mdp->cd->bculr)
sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 22301bf9c21d..71f5de1171bd 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -369,6 +369,8 @@ enum DESC_I_BIT {
DESC_I_RINT1 = 0x0001,
};
+#define DEFAULT_TRSCER_ERR_MASK (DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2)
+
/* RPADIR */
enum RPADIR_BIT {
RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000,
@@ -470,6 +472,9 @@ struct sh_eth_cpu_data {
unsigned long tx_check;
unsigned long eesr_err_check;
+ /* Error mask */
+ unsigned long trscer_err_mask;
+
/* hardware features */
unsigned long irq_flags; /* IRQ configuration flags */
unsigned no_psr:1; /* EtherC DO NOT have PSR */
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index c560f9aeb55d..64d1cef4cda1 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -610,7 +610,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
/* Clear all mcast from ALE */
cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS <<
- priv->host_port);
+ priv->host_port, -1);
/* Flood All Unicast Packets to Host port */
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
@@ -634,6 +634,12 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ int vid;
+
+ if (priv->data.dual_emac)
+ vid = priv->slaves[priv->emac_port].port_vlan;
+ else
+ vid = priv->data.default_vlan;
if (ndev->flags & IFF_PROMISC) {
/* Enable promiscuous mode */
@@ -649,7 +655,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI);
/* Clear all mcast from ALE */
- cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port);
+ cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port,
+ vid);
if (!netdev_mc_empty(ndev)) {
struct netdev_hw_addr *ha;
@@ -757,6 +764,14 @@ requeue:
static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
{
struct cpsw_priv *priv = dev_id;
+ int value = irq - priv->irqs_table[0];
+
+ /* NOTICE: Ending IRQ here. The trick with the 'value' variable above
+ * is to make sure we will always write the correct value to the EOI
+ * register. Namely 0 for RX_THRESH Interrupt, 1 for RX Interrupt, 2
+ * for TX Interrupt and 3 for MISC Interrupt.
+ */
+ cpdma_ctlr_eoi(priv->dma, value);
cpsw_intr_disable(priv);
if (priv->irq_enabled == true) {
@@ -786,8 +801,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
int num_tx, num_rx;
num_tx = cpdma_chan_process(priv->txch, 128);
- if (num_tx)
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
num_rx = cpdma_chan_process(priv->rxch, budget);
if (num_rx < budget) {
@@ -795,7 +808,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
prim_cpsw = cpsw_get_slave_priv(priv, 0);
if (prim_cpsw->irq_enabled == false) {
prim_cpsw->irq_enabled = true;
@@ -1310,8 +1322,6 @@ static int cpsw_ndo_open(struct net_device *ndev)
napi_enable(&priv->napi);
cpdma_ctlr_start(priv->dma);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
prim_cpsw = cpsw_get_slave_priv(priv, 0);
if (prim_cpsw->irq_enabled == false) {
@@ -1578,9 +1588,6 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
cpdma_chan_start(priv->txch);
cpdma_ctlr_int_ctrl(priv->dma, true);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
-
}
static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
@@ -1620,9 +1627,6 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
cpsw_interrupt(ndev->irq, priv);
cpdma_ctlr_int_ctrl(priv->dma, true);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
-
}
#endif
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 097ebe7077ac..5246b3a18ff8 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -234,7 +234,7 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
}
-int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
{
u32 ale_entry[ALE_ENTRY_WORDS];
int ret, idx;
@@ -245,6 +245,14 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
continue;
+ /* if vid passed is -1 then remove all multicast entry from
+ * the table irrespective of vlan id, if a valid vlan id is
+ * passed then remove only multicast added to that vlan id.
+ * if vlan id doesn't match then move on to next entry.
+ */
+ if (vid != -1 && cpsw_ale_get_vlan_id(ale_entry) != vid)
+ continue;
+
if (cpsw_ale_get_mcast(ale_entry)) {
u8 addr[6];
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index c0d4127aa549..af1e7ecd87c6 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -92,7 +92,7 @@ void cpsw_ale_stop(struct cpsw_ale *ale);
int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
-int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask);
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid);
int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
int flags, u16 vid);
int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 9c2d91ea0af4..dbcbf0c5bcfa 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1043,6 +1043,7 @@ static int temac_of_probe(struct platform_device *op)
lp->regs = of_iomap(op->dev.of_node, 0);
if (!lp->regs) {
dev_err(&op->dev, "could not map temac regs.\n");
+ rc = -ENOMEM;
goto nodev;
}
@@ -1062,6 +1063,7 @@ static int temac_of_probe(struct platform_device *op)
np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
if (!np) {
dev_err(&op->dev, "could not find DMA node\n");
+ rc = -ENODEV;
goto err_iounmap;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index c18a0c637c44..a6d2860b712c 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1501,6 +1501,7 @@ static int axienet_of_probe(struct platform_device *op)
lp->regs = of_iomap(op->dev.of_node, 0);
if (!lp->regs) {
dev_err(&op->dev, "could not map Axi Ethernet regs.\n");
+ ret = -ENOMEM;
goto nodev;
}
/* Setup checksum offload, but default to off if not specified */
@@ -1563,6 +1564,7 @@ static int axienet_of_probe(struct platform_device *op)
np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0);
if (!np) {
dev_err(&op->dev, "could not find DMA node\n");
+ ret = -ENODEV;
goto err_iounmap;
}
lp->dma_regs = of_iomap(np, 0);
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 24858799c204..9d4ce388510a 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1109,6 +1109,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(dev, "no IRQ found\n");
+ rc = -ENXIO;
goto error;
}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 93e224217e24..f7ff493f1e73 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -629,6 +629,7 @@ static int team_change_mode(struct team *team, const char *kind)
static void team_notify_peers_work(struct work_struct *work)
{
struct team *team;
+ int val;
team = container_of(work, struct team, notify_peers.dw.work);
@@ -636,9 +637,14 @@ static void team_notify_peers_work(struct work_struct *work)
schedule_delayed_work(&team->notify_peers.dw, 0);
return;
}
+ val = atomic_dec_if_positive(&team->notify_peers.count_pending);
+ if (val < 0) {
+ rtnl_unlock();
+ return;
+ }
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
rtnl_unlock();
- if (!atomic_dec_and_test(&team->notify_peers.count_pending))
+ if (val)
schedule_delayed_work(&team->notify_peers.dw,
msecs_to_jiffies(team->notify_peers.interval));
}
@@ -669,6 +675,7 @@ static void team_notify_peers_fini(struct team *team)
static void team_mcast_rejoin_work(struct work_struct *work)
{
struct team *team;
+ int val;
team = container_of(work, struct team, mcast_rejoin.dw.work);
@@ -676,9 +683,14 @@ static void team_mcast_rejoin_work(struct work_struct *work)
schedule_delayed_work(&team->mcast_rejoin.dw, 0);
return;
}
+ val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
+ if (val < 0) {
+ rtnl_unlock();
+ return;
+ }
call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
rtnl_unlock();
- if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending))
+ if (val)
schedule_delayed_work(&team->mcast_rejoin.dw,
msecs_to_jiffies(team->mcast_rejoin.interval));
}
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index dcb6d33141e0..1e9cdca37014 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -1276,7 +1276,7 @@ static int usb_start_wait_urb(struct urb *urb, int timeout, int* actual_length)
awd.done = 0;
urb->context = &awd;
- status = usb_submit_urb(urb, GFP_NOIO);
+ status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
// something went wrong
usb_free_urb(urb);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index b8a82b86f909..602dc6668c3a 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -56,6 +56,8 @@ struct qmi_wwan_state {
/* default ethernet address used by the modem */
static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3};
+static const u8 buggy_fw_addr[ETH_ALEN] = {0x00, 0xa0, 0xc6, 0x00, 0x00, 0x00};
+
/* Make up an ethernet header if the packet doesn't have one.
*
* A firmware bug common among several devices cause them to send raw
@@ -332,10 +334,12 @@ next_desc:
usb_driver_release_interface(driver, info->data);
}
- /* Never use the same address on both ends of the link, even
- * if the buggy firmware told us to.
+ /* Never use the same address on both ends of the link, even if the
+ * buggy firmware told us to. Or, if device is assigned the well-known
+ * buggy firmware MAC address, replace it with a random address,
*/
- if (ether_addr_equal(dev->net->dev_addr, default_modem_addr))
+ if (ether_addr_equal(dev->net->dev_addr, default_modem_addr) ||
+ ether_addr_equal(dev->net->dev_addr, buggy_fw_addr))
eth_hw_addr_random(dev->net);
/* make MAC addr easily distinguishable from an IP header */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 2d1c77e81836..57ec23e8ccfa 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1897,6 +1897,22 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev)
netif_wake_queue(netdev);
}
+static netdev_features_t
+rtl8152_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ u32 mss = skb_shinfo(skb)->gso_size;
+ int max_offset = mss ? GTTCPHO_MAX : TCPHO_MAX;
+ int offset = skb_transport_offset(skb);
+
+ if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) && offset > max_offset)
+ features &= ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+ else if ((skb->len + sizeof(struct tx_desc)) > agg_buf_sz)
+ features &= ~NETIF_F_GSO_MASK;
+
+ return features;
+}
+
static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
@@ -3706,6 +3722,7 @@ static const struct net_device_ops rtl8152_netdev_ops = {
.ndo_set_mac_address = rtl8152_set_mac_address,
.ndo_change_mtu = rtl8152_change_mtu,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_features_check = rtl8152_features_check,
};
static void r8152b_get_version(struct r8152 *tp)
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index e5be2d21868f..a5f9198d5747 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -69,8 +69,8 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX 10
-#define IWL3160_UCODE_API_MAX 10
+#define IWL7260_UCODE_API_MAX 12
+#define IWL3160_UCODE_API_MAX 12
/* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 10
@@ -105,7 +105,7 @@
#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
#define IWL7265D_FW_PRE "iwlwifi-7265D-"
-#define IWL7265D_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
+#define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_7000 0
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index bf0a95cb7153..3668fc57e770 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -69,7 +69,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX 10
+#define IWL8000_UCODE_API_MAX 12
/* Oldest version we won't warn about */
#define IWL8000_UCODE_API_OK 10
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index f2a047f6bb3e..1bbe4fc47b97 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -243,6 +243,9 @@ enum iwl_ucode_tlv_flag {
* @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif.
* @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
* longer than the passive one, which is essential for fragmented scan.
+ * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
+ * regardless of the band or the number of the probes. FW will calculate
+ * the actual dwell time.
*/
enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
@@ -253,6 +256,7 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6),
IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7),
IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
+ IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13),
};
/**
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 1f2acf47bfb2..201846de94e7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -672,6 +672,7 @@ struct iwl_scan_channel_opt {
* @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented
* @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report
* and DS parameter set IEs into probe requests.
+ * @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches
*/
enum iwl_mvm_lmac_scan_flags {
IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0),
@@ -681,6 +682,7 @@ enum iwl_mvm_lmac_scan_flags {
IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4),
IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5),
IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6),
+ IWL_MVM_LMAC_SCAN_FLAG_MATCH = BIT(9),
};
enum iwl_scan_priority {
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index e5294d01181e..ec9a8e7bae1d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -171,15 +171,21 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_ssid_ie *cmd_ssid,
* already included in the probe template, so we need to set only
* req->n_ssids - 1 bits in addition to the first bit.
*/
-static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids)
+static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
+ enum ieee80211_band band, int n_ssids)
{
+ if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
+ return 10;
if (band == IEEE80211_BAND_2GHZ)
return 20 + 3 * (n_ssids + 1);
return 10 + 2 * (n_ssids + 1);
}
-static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band)
+static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
+ enum ieee80211_band band)
{
+ if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
+ return 110;
return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
}
@@ -331,7 +337,8 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
*/
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
u32 passive_dwell =
- iwl_mvm_get_passive_dwell(IEEE80211_BAND_2GHZ);
+ iwl_mvm_get_passive_dwell(mvm,
+ IEEE80211_BAND_2GHZ);
params->max_out_time = passive_dwell;
} else {
params->passive_fragmented = true;
@@ -348,8 +355,8 @@ not_bound:
params->dwell[band].passive = frag_passive_dwell;
else
params->dwell[band].passive =
- iwl_mvm_get_passive_dwell(band);
- params->dwell[band].active = iwl_mvm_get_active_dwell(band,
+ iwl_mvm_get_passive_dwell(mvm, band);
+ params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band,
n_ssids);
}
}
@@ -1448,6 +1455,8 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
if (iwl_mvm_scan_pass_all(mvm, req))
flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
+ else
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0)
flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 4f15d9decc81..4333306ccdee 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -108,8 +108,12 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
}
- /* tid_tspec will default to 0 = BE when QOS isn't enabled */
- ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
+ /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */
+ if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
+ ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
+ else
+ ac = tid_to_mac80211_ac[0];
+
tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
TX_CMD_FLG_BT_PRIO_POS;
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index e56e77ef5d2e..917431e30f74 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -665,7 +665,7 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
if (num_of_ant(mvm->fw->valid_rx_ant) == 1)
return false;
- if (!mvm->cfg->rx_with_siso_diversity)
+ if (mvm->cfg->rx_with_siso_diversity)
return false;
ieee80211_iterate_active_interfaces_atomic(
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 2f0c4b170344..d5aadb00dd9e 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -527,8 +527,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
else if (cfg == &iwl7265_n_cfg)
cfg_7265d = &iwl7265d_n_cfg;
if (cfg_7265d &&
- (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)
+ (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) {
cfg = cfg_7265d;
+ iwl_trans->cfg = cfg_7265d;
+ }
#endif
pci_set_drvdata(pdev, iwl_trans);
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 846a2e6e34d8..c70efb9a6e78 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -666,7 +666,8 @@ tx_status_ok:
}
static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
- u8 *entry, int rxring_idx, int desc_idx)
+ struct sk_buff *new_skb, u8 *entry,
+ int rxring_idx, int desc_idx)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -674,11 +675,15 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
u8 tmp_one = 1;
struct sk_buff *skb;
+ if (likely(new_skb)) {
+ skb = new_skb;
+ goto remap;
+ }
skb = dev_alloc_skb(rtlpci->rxbuffersize);
if (!skb)
return 0;
- rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
+remap:
/* just set skb->cb to mapping addr for pci_unmap_single use */
*((dma_addr_t *)skb->cb) =
pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
@@ -686,6 +691,7 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
bufferaddress = *((dma_addr_t *)skb->cb);
if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
return 0;
+ rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
if (rtlpriv->use_new_trx_flow) {
rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
HW_DESC_RX_PREPARE,
@@ -781,6 +787,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
/*rx pkt */
struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[
rtlpci->rx_ring[rxring_idx].idx];
+ struct sk_buff *new_skb;
if (rtlpriv->use_new_trx_flow) {
rx_remained_cnt =
@@ -807,6 +814,13 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb),
rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
+ /* get a new skb - if fail, old one will be reused */
+ new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
+ if (unlikely(!new_skb)) {
+ pr_err("Allocation of new skb failed in %s\n",
+ __func__);
+ goto no_new;
+ }
if (rtlpriv->use_new_trx_flow) {
buffer_desc =
&rtlpci->rx_ring[rxring_idx].buffer_desc
@@ -911,14 +925,16 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
schedule_work(&rtlpriv->works.lps_change_work);
}
end:
+ skb = new_skb;
+no_new:
if (rtlpriv->use_new_trx_flow) {
- _rtl_pci_init_one_rxdesc(hw, (u8 *)buffer_desc,
+ _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc,
rxring_idx,
- rtlpci->rx_ring[rxring_idx].idx);
+ rtlpci->rx_ring[rxring_idx].idx);
} else {
- _rtl_pci_init_one_rxdesc(hw, (u8 *)pdesc, rxring_idx,
+ _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc,
+ rxring_idx,
rtlpci->rx_ring[rxring_idx].idx);
-
if (rtlpci->rx_ring[rxring_idx].idx ==
rtlpci->rxringcount - 1)
rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc,
@@ -1307,7 +1323,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
rtlpci->rx_ring[rxring_idx].idx = 0;
for (i = 0; i < rtlpci->rxringcount; i++) {
entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i];
- if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
+ if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry,
rxring_idx, i))
return -ENOMEM;
}
@@ -1332,7 +1348,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
for (i = 0; i < rtlpci->rxringcount; i++) {
entry = &rtlpci->rx_ring[rxring_idx].desc[i];
- if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
+ if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry,
rxring_idx, i))
return -ENOMEM;
}
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index efbaf2ae1999..794204e34fba 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -737,6 +737,7 @@ static void connect(struct backend_info *be)
}
queue->remaining_credit = credit_bytes;
+ queue->credit_usec = credit_usec;
err = connect_rings(be, queue);
if (err) {
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 22bcb4e12e2a..d8c10764f130 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -88,10 +88,8 @@ struct netfront_cb {
#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
struct netfront_stats {
- u64 rx_packets;
- u64 tx_packets;
- u64 rx_bytes;
- u64 tx_bytes;
+ u64 packets;
+ u64 bytes;
struct u64_stats_sync syncp;
};
@@ -160,7 +158,8 @@ struct netfront_info {
struct netfront_queue *queues;
/* Statistics */
- struct netfront_stats __percpu *stats;
+ struct netfront_stats __percpu *rx_stats;
+ struct netfront_stats __percpu *tx_stats;
atomic_t rx_gso_checksum_fixup;
};
@@ -565,7 +564,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned short id;
struct netfront_info *np = netdev_priv(dev);
- struct netfront_stats *stats = this_cpu_ptr(np->stats);
+ struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
struct xen_netif_tx_request *tx;
char *data = skb->data;
RING_IDX i;
@@ -672,10 +671,10 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (notify)
notify_remote_via_irq(queue->tx_irq);
- u64_stats_update_begin(&stats->syncp);
- stats->tx_bytes += skb->len;
- stats->tx_packets++;
- u64_stats_update_end(&stats->syncp);
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->bytes += skb->len;
+ tx_stats->packets++;
+ u64_stats_update_end(&tx_stats->syncp);
/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
xennet_tx_buf_gc(queue);
@@ -931,7 +930,7 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
static int handle_incoming_queue(struct netfront_queue *queue,
struct sk_buff_head *rxq)
{
- struct netfront_stats *stats = this_cpu_ptr(queue->info->stats);
+ struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
int packets_dropped = 0;
struct sk_buff *skb;
@@ -952,10 +951,10 @@ static int handle_incoming_queue(struct netfront_queue *queue,
continue;
}
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
- u64_stats_update_end(&stats->syncp);
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->packets++;
+ rx_stats->bytes += skb->len;
+ u64_stats_update_end(&rx_stats->syncp);
/* Pass it up. */
napi_gro_receive(&queue->napi, skb);
@@ -1079,18 +1078,22 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
int cpu;
for_each_possible_cpu(cpu) {
- struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
+ struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
+ struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ tx_packets = tx_stats->packets;
+ tx_bytes = tx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
- rx_packets = stats->rx_packets;
- tx_packets = stats->tx_packets;
- rx_bytes = stats->rx_bytes;
- tx_bytes = stats->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ do {
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ rx_packets = rx_stats->packets;
+ rx_bytes = rx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
tot->rx_packets += rx_packets;
tot->tx_packets += tx_packets;
@@ -1275,6 +1278,15 @@ static const struct net_device_ops xennet_netdev_ops = {
#endif
};
+static void xennet_free_netdev(struct net_device *netdev)
+{
+ struct netfront_info *np = netdev_priv(netdev);
+
+ free_percpu(np->rx_stats);
+ free_percpu(np->tx_stats);
+ free_netdev(netdev);
+}
+
static struct net_device *xennet_create_dev(struct xenbus_device *dev)
{
int err;
@@ -1295,8 +1307,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
np->queues = NULL;
err = -ENOMEM;
- np->stats = netdev_alloc_pcpu_stats(struct netfront_stats);
- if (np->stats == NULL)
+ np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
+ if (np->rx_stats == NULL)
+ goto exit;
+ np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
+ if (np->tx_stats == NULL)
goto exit;
netdev->netdev_ops = &xennet_netdev_ops;
@@ -1327,7 +1342,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
return netdev;
exit:
- free_netdev(netdev);
+ xennet_free_netdev(netdev);
return ERR_PTR(err);
}
@@ -1369,7 +1384,7 @@ static int netfront_probe(struct xenbus_device *dev,
return 0;
fail:
- free_netdev(netdev);
+ xennet_free_netdev(netdev);
dev_set_drvdata(&dev->dev, NULL);
return err;
}
@@ -2189,9 +2204,7 @@ static int xennet_remove(struct xenbus_device *dev)
info->queues = NULL;
}
- free_percpu(info->stats);
-
- free_netdev(info->netdev);
+ xennet_free_netdev(info->netdev);
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index ba74f0aa60c7..3c22dbebc80f 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -89,6 +89,7 @@ struct rockchip_iomux {
* @reg_pull: optional separate register for additional pull settings
* @clk: clock of the gpio bank
* @irq: interrupt of the gpio bank
+ * @saved_enables: Saved content of GPIO_INTEN at suspend time.
* @pin_base: first pin number
* @nr_pins: number of pins in this bank
* @name: name of the bank
@@ -107,6 +108,7 @@ struct rockchip_pin_bank {
struct regmap *regmap_pull;
struct clk *clk;
int irq;
+ u32 saved_enables;
u32 pin_base;
u8 nr_pins;
char *name;
@@ -1543,6 +1545,51 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
return 0;
}
+static void rockchip_irq_suspend(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct rockchip_pin_bank *bank = gc->private;
+
+ bank->saved_enables = irq_reg_readl(gc, GPIO_INTEN);
+ irq_reg_writel(gc, gc->wake_active, GPIO_INTEN);
+}
+
+static void rockchip_irq_resume(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct rockchip_pin_bank *bank = gc->private;
+
+ irq_reg_writel(gc, bank->saved_enables, GPIO_INTEN);
+}
+
+static void rockchip_irq_disable(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ irq_gc_lock(gc);
+
+ val = irq_reg_readl(gc, GPIO_INTEN);
+ val &= ~d->mask;
+ irq_reg_writel(gc, val, GPIO_INTEN);
+
+ irq_gc_unlock(gc);
+}
+
+static void rockchip_irq_enable(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ irq_gc_lock(gc);
+
+ val = irq_reg_readl(gc, GPIO_INTEN);
+ val |= d->mask;
+ irq_reg_writel(gc, val, GPIO_INTEN);
+
+ irq_gc_unlock(gc);
+}
+
static int rockchip_interrupts_register(struct platform_device *pdev,
struct rockchip_pinctrl *info)
{
@@ -1581,12 +1628,16 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
gc = irq_get_domain_generic_chip(bank->domain, 0);
gc->reg_base = bank->reg_base;
gc->private = bank;
- gc->chip_types[0].regs.mask = GPIO_INTEN;
+ gc->chip_types[0].regs.mask = GPIO_INTMASK;
gc->chip_types[0].regs.ack = GPIO_PORTS_EOI;
gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
- gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
- gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
+ gc->chip_types[0].chip.irq_enable = rockchip_irq_enable;
+ gc->chip_types[0].chip.irq_disable = rockchip_irq_disable;
gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
+ gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend;
+ gc->chip_types[0].chip.irq_resume = rockchip_irq_resume;
gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type;
gc->wake_enabled = IRQ_MSK(bank->nr_pins);
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 7c9d51382248..9e5ec00084bb 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1012,8 +1012,10 @@ static void st_pinconf_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s, unsigned pin_id)
{
unsigned long config;
- st_pinconf_get(pctldev, pin_id, &config);
+ mutex_unlock(&pctldev->mutex);
+ st_pinconf_get(pctldev, pin_id, &config);
+ mutex_lock(&pctldev->mutex);
seq_printf(s, "[OE:%ld,PU:%ld,OD:%ld]\n"
"\t\t[retime:%ld,invclk:%ld,clknotdat:%ld,"
"de:%ld,rt-clk:%ld,rt-delay:%ld]",
@@ -1443,6 +1445,7 @@ static struct gpio_chip st_gpio_template = {
static struct irq_chip st_gpio_irqchip = {
.name = "GPIO",
+ .irq_disable = st_gpio_irq_mask,
.irq_mask = st_gpio_irq_mask,
.irq_unmask = st_gpio_irq_unmask,
.irq_set_type = st_gpio_irq_set_type,
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 91e97ec01418..4d41bf75c233 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1163,9 +1163,13 @@ static inline int ap_test_config_card_id(unsigned int id)
*/
static inline int ap_test_config_domain(unsigned int domain)
{
- if (!ap_configuration)
- return 1;
- return ap_test_config(ap_configuration->aqm, domain);
+ if (!ap_configuration) /* QCI not supported */
+ if (domain < 16)
+ return 1; /* then domains 0...15 are configured */
+ else
+ return 0;
+ else
+ return ap_test_config(ap_configuration->aqm, domain);
}
/**
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 12ca291c1380..cce1cbc1a927 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -734,7 +734,9 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
* Return target busy if we've received a non-zero retry_delay_timer
* in a FCP_RSP.
*/
- if (time_after(jiffies, fcport->retry_delay_timestamp))
+ if (fcport->retry_delay_timestamp == 0) {
+ /* retry delay not set */
+ } else if (time_after(jiffies, fcport->retry_delay_timestamp))
fcport->retry_delay_timestamp = 0;
else
goto qc24_target_busy;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 55f6774f706f..aebde3289c50 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -2027,10 +2027,10 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
goto reject;
}
if (!strncmp("=All", text_ptr, 4)) {
- cmd->cmd_flags |= IFC_SENDTARGETS_ALL;
+ cmd->cmd_flags |= ICF_SENDTARGETS_ALL;
} else if (!strncmp("=iqn.", text_ptr, 5) ||
!strncmp("=eui.", text_ptr, 5)) {
- cmd->cmd_flags |= IFC_SENDTARGETS_SINGLE;
+ cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE;
} else {
pr_err("Unable to locate valid SendTargets=%s value\n", text_ptr);
goto reject;
@@ -3415,10 +3415,10 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
return -ENOMEM;
}
/*
- * Locate pointer to iqn./eui. string for IFC_SENDTARGETS_SINGLE
+ * Locate pointer to iqn./eui. string for ICF_SENDTARGETS_SINGLE
* explicit case..
*/
- if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) {
+ if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) {
text_ptr = strchr(text_in, '=');
if (!text_ptr) {
pr_err("Unable to locate '=' string in text_in:"
@@ -3434,7 +3434,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
spin_lock(&tiqn_lock);
list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
- if ((cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) &&
+ if ((cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) &&
strcmp(tiqn->tiqn, text_ptr)) {
continue;
}
@@ -3512,7 +3512,7 @@ eob:
if (end_of_buf)
break;
- if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE)
+ if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE)
break;
}
spin_unlock(&tiqn_lock);
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 09a522bae222..cbcff38ac9b7 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -135,8 +135,8 @@ enum cmd_flags_table {
ICF_CONTIG_MEMORY = 0x00000020,
ICF_ATTACHED_TO_RQUEUE = 0x00000040,
ICF_OOO_CMDSN = 0x00000080,
- IFC_SENDTARGETS_ALL = 0x00000100,
- IFC_SENDTARGETS_SINGLE = 0x00000200,
+ ICF_SENDTARGETS_ALL = 0x00000100,
+ ICF_SENDTARGETS_SINGLE = 0x00000200,
};
/* struct iscsi_cmd->i_state */
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 7653cfb027a2..58f49ff69b14 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -1103,51 +1103,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
}
EXPORT_SYMBOL(se_dev_set_queue_depth);
-int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
-{
- int block_size = dev->dev_attrib.block_size;
-
- if (dev->export_count) {
- pr_err("dev[%p]: Unable to change SE Device"
- " fabric_max_sectors while export_count is %d\n",
- dev, dev->export_count);
- return -EINVAL;
- }
- if (!fabric_max_sectors) {
- pr_err("dev[%p]: Illegal ZERO value for"
- " fabric_max_sectors\n", dev);
- return -EINVAL;
- }
- if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
- pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
- " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
- DA_STATUS_MAX_SECTORS_MIN);
- return -EINVAL;
- }
- if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
- pr_err("dev[%p]: Passed fabric_max_sectors: %u"
- " greater than DA_STATUS_MAX_SECTORS_MAX:"
- " %u\n", dev, fabric_max_sectors,
- DA_STATUS_MAX_SECTORS_MAX);
- return -EINVAL;
- }
- /*
- * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
- */
- if (!block_size) {
- block_size = 512;
- pr_warn("Defaulting to 512 for zero block_size\n");
- }
- fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
- block_size);
-
- dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
- pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
- dev, fabric_max_sectors);
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_fabric_max_sectors);
-
int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
{
if (dev->export_count) {
@@ -1156,10 +1111,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
dev, dev->export_count);
return -EINVAL;
}
- if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
+ if (optimal_sectors > dev->dev_attrib.hw_max_sectors) {
pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
- " greater than fabric_max_sectors: %u\n", dev,
- optimal_sectors, dev->dev_attrib.fabric_max_sectors);
+ " greater than hw_max_sectors: %u\n", dev,
+ optimal_sectors, dev->dev_attrib.hw_max_sectors);
return -EINVAL;
}
@@ -1553,8 +1508,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->dev_attrib.unmap_granularity_alignment =
DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
- dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
- dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
xcopy_lun = &dev->xcopy_lun;
xcopy_lun->lun_se_dev = dev;
@@ -1595,6 +1548,7 @@ int target_configure_device(struct se_device *dev)
dev->dev_attrib.hw_max_sectors =
se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
dev->dev_attrib.hw_block_size);
+ dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
dev->creation_time = get_jiffies_64();
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index c2aea099ea4a..d836de200a03 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -621,7 +621,16 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
struct fd_prot fd_prot;
sense_reason_t rc;
int ret = 0;
-
+ /*
+ * We are currently limited by the number of iovecs (2048) per
+ * single vfs_[writev,readv] call.
+ */
+ if (cmd->data_length > FD_MAX_BYTES) {
+ pr_err("FILEIO: Not able to process I/O of %u bytes due to"
+ "FD_MAX_BYTES: %u iovec count limitiation\n",
+ cmd->data_length, FD_MAX_BYTES);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
/*
* Call vectorized fileio functions to map struct scatterlist
* physical memory addresses to struct iovec virtual memory.
@@ -959,7 +968,6 @@ static struct configfs_attribute *fileio_backend_dev_attrs[] = {
&fileio_dev_attrib_hw_block_size.attr,
&fileio_dev_attrib_block_size.attr,
&fileio_dev_attrib_hw_max_sectors.attr,
- &fileio_dev_attrib_fabric_max_sectors.attr,
&fileio_dev_attrib_optimal_sectors.attr,
&fileio_dev_attrib_hw_queue_depth.attr,
&fileio_dev_attrib_queue_depth.attr,
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 3efff94fbd97..78346b850968 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -124,7 +124,7 @@ static int iblock_configure_device(struct se_device *dev)
q = bdev_get_queue(bd);
dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
- dev->dev_attrib.hw_max_sectors = UINT_MAX;
+ dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
dev->dev_attrib.hw_queue_depth = q->nr_requests;
/*
@@ -883,7 +883,6 @@ static struct configfs_attribute *iblock_backend_dev_attrs[] = {
&iblock_dev_attrib_hw_block_size.attr,
&iblock_dev_attrib_block_size.attr,
&iblock_dev_attrib_hw_max_sectors.attr,
- &iblock_dev_attrib_fabric_max_sectors.attr,
&iblock_dev_attrib_optimal_sectors.attr,
&iblock_dev_attrib_hw_queue_depth.attr,
&iblock_dev_attrib_queue_depth.attr,
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index d56f2aaba9af..283cf786ef98 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -528,6 +528,18 @@ static int core_scsi3_pr_seq_non_holder(
return 0;
}
+ } else if (we && registered_nexus) {
+ /*
+ * Reads are allowed for Write Exclusive locks
+ * from all registrants.
+ */
+ if (cmd->data_direction == DMA_FROM_DEVICE) {
+ pr_debug("Allowing READ CDB: 0x%02x for %s"
+ " reservation\n", cdb[0],
+ core_scsi3_pr_dump_type(pr_reg_type));
+
+ return 0;
+ }
}
pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x"
" for %s reservation\n", transport_dump_cmd_direction(cmd),
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 60ebd170a561..98e83ac5661b 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -657,7 +657,6 @@ static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = {
&rd_mcp_dev_attrib_hw_block_size.attr,
&rd_mcp_dev_attrib_block_size.attr,
&rd_mcp_dev_attrib_hw_max_sectors.attr,
- &rd_mcp_dev_attrib_fabric_max_sectors.attr,
&rd_mcp_dev_attrib_optimal_sectors.attr,
&rd_mcp_dev_attrib_hw_queue_depth.attr,
&rd_mcp_dev_attrib_queue_depth.attr,
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 11bea1952435..cd4bed7b2757 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -953,21 +953,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
unsigned long long end_lba;
-
- if (sectors > dev->dev_attrib.fabric_max_sectors) {
- printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
- " big sectors %u exceeds fabric_max_sectors:"
- " %u\n", cdb[0], sectors,
- dev->dev_attrib.fabric_max_sectors);
- return TCM_INVALID_CDB_FIELD;
- }
- if (sectors > dev->dev_attrib.hw_max_sectors) {
- printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
- " big sectors %u exceeds backend hw_max_sectors:"
- " %u\n", cdb[0], sectors,
- dev->dev_attrib.hw_max_sectors);
- return TCM_INVALID_CDB_FIELD;
- }
check_lba:
end_lba = dev->transport->get_blocks(dev) + 1;
if (cmd->t_task_lba + sectors > end_lba) {
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 1307600fe726..4c71657da56a 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -505,7 +505,6 @@ static sense_reason_t
spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
- u32 max_sectors;
int have_tp = 0;
int opt, min;
@@ -539,9 +538,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* Set MAXIMUM TRANSFER LENGTH
*/
- max_sectors = min(dev->dev_attrib.fabric_max_sectors,
- dev->dev_attrib.hw_max_sectors);
- put_unaligned_be32(max_sectors, &buf[8]);
+ put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]);
/*
* Set OPTIMAL TRANSFER LENGTH
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 8bfa61c9693d..1157b559683b 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -1118,7 +1118,6 @@ static struct configfs_attribute *tcmu_backend_dev_attrs[] = {
&tcmu_dev_attrib_hw_block_size.attr,
&tcmu_dev_attrib_block_size.attr,
&tcmu_dev_attrib_hw_max_sectors.attr,
- &tcmu_dev_attrib_fabric_max_sectors.attr,
&tcmu_dev_attrib_optimal_sectors.attr,
&tcmu_dev_attrib_hw_queue_depth.attr,
&tcmu_dev_attrib_queue_depth.attr,
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index c1188ac053c9..2ccbc0788353 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -608,6 +608,7 @@ static int imx_thermal_suspend(struct device *dev)
regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_MEASURE_TEMP);
regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
data->mode = THERMAL_DEVICE_DISABLED;
+ clk_disable_unprepare(data->thermal_clk);
return 0;
}
@@ -617,6 +618,7 @@ static int imx_thermal_resume(struct device *dev)
struct imx_thermal_data *data = dev_get_drvdata(dev);
struct regmap *map = data->tempmon;
+ clk_prepare_enable(data->thermal_clk);
/* Enabled thermal sensor after resume */
regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
diff --git a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
index 231cabc16e16..2c2ec7666eb1 100644
--- a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
+++ b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
@@ -119,15 +119,11 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp,
continue;
result = acpi_bus_get_device(trt->source, &adev);
- if (!result)
- acpi_create_platform_device(adev);
- else
+ if (result)
pr_warn("Failed to get source ACPI device\n");
result = acpi_bus_get_device(trt->target, &adev);
- if (!result)
- acpi_create_platform_device(adev);
- else
+ if (result)
pr_warn("Failed to get target ACPI device\n");
}
@@ -206,16 +202,12 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp,
if (art->source) {
result = acpi_bus_get_device(art->source, &adev);
- if (!result)
- acpi_create_platform_device(adev);
- else
+ if (result)
pr_warn("Failed to get source ACPI device\n");
}
if (art->target) {
result = acpi_bus_get_device(art->target, &adev);
- if (!result)
- acpi_create_platform_device(adev);
- else
+ if (result)
pr_warn("Failed to get source ACPI device\n");
}
}
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c
index 31bb553aac26..0fe5dbbea968 100644
--- a/drivers/thermal/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c
@@ -130,6 +130,8 @@ static int proc_thermal_add(struct device *dev,
int ret;
adev = ACPI_COMPANION(dev);
+ if (!adev)
+ return -ENODEV;
status = acpi_evaluate_object(adev->handle, "PPCC", NULL, &buf);
if (ACPI_FAILURE(status))
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index e145b66df444..d717f3dab6f1 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -149,7 +149,7 @@ EXPORT_SYMBOL_GPL(of_thermal_is_trip_valid);
*
* Return: pointer to trip points table, NULL otherwise
*/
-const struct thermal_trip * const
+const struct thermal_trip *
of_thermal_get_trip_points(struct thermal_zone_device *tz)
{
struct __thermal_zone *data = tz->devdata;
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 8803e693fe68..2580a4872f90 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -63,7 +63,7 @@ struct rcar_thermal_priv {
struct mutex lock;
struct list_head list;
int id;
- int ctemp;
+ u32 ctemp;
};
#define rcar_thermal_for_each_priv(pos, common) \
@@ -145,7 +145,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
{
struct device *dev = rcar_priv_to_dev(priv);
int i;
- int ctemp, old, new;
+ u32 ctemp, old, new;
int ret = -EINVAL;
mutex_lock(&priv->lock);
@@ -372,6 +372,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
int i;
int ret = -ENODEV;
int idle = IDLE_INTERVAL;
+ u32 enr_bits = 0;
common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
if (!common)
@@ -390,7 +391,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
/*
* platform has IRQ support.
- * Then, drier use common register
+ * Then, driver uses common registers
*/
ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0,
@@ -408,9 +409,6 @@ static int rcar_thermal_probe(struct platform_device *pdev)
if (IS_ERR(common->base))
return PTR_ERR(common->base);
- /* enable temperature comparation */
- rcar_thermal_common_write(common, ENR, 0x00030303);
-
idle = 0; /* polling delay is not needed */
}
@@ -452,8 +450,15 @@ static int rcar_thermal_probe(struct platform_device *pdev)
rcar_thermal_irq_enable(priv);
list_move_tail(&priv->list, &common->head);
+
+ /* update ENR bits */
+ enr_bits |= 3 << (i * 8);
}
+ /* enable temperature comparation */
+ if (irq)
+ rcar_thermal_common_write(common, ENR, enr_bits);
+
platform_set_drvdata(pdev, common);
dev_info(dev, "%d sensor probed\n", i);
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 9083e7520623..0531c752fbbb 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -91,7 +91,7 @@ int of_parse_thermal_zones(void);
void of_thermal_destroy_zones(void);
int of_thermal_get_ntrips(struct thermal_zone_device *);
bool of_thermal_is_trip_valid(struct thermal_zone_device *, int);
-const struct thermal_trip * const
+const struct thermal_trip *
of_thermal_get_trip_points(struct thermal_zone_device *);
#else
static inline int of_parse_thermal_zones(void) { return 0; }
@@ -105,7 +105,7 @@ static inline bool of_thermal_is_trip_valid(struct thermal_zone_device *tz,
{
return 0;
}
-static inline const struct thermal_trip * const
+static inline const struct thermal_trip *
of_thermal_get_trip_points(struct thermal_zone_device *tz)
{
return NULL;
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 255201f22126..7cc0122a18ce 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -840,13 +840,11 @@ static const struct vfio_device_ops vfio_pci_ops = {
static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- u8 type;
struct vfio_pci_device *vdev;
struct iommu_group *group;
int ret;
- pci_read_config_byte(pdev, PCI_HEADER_TYPE, &type);
- if ((type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL)
+ if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
return -EINVAL;
group = iommu_group_get(&pdev->dev);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 14419a8ccbb6..d415d69dc237 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -538,7 +538,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
++headcount;
seg += in;
}
- heads[headcount - 1].len = cpu_to_vhost32(vq, len - datalen);
+ heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen);
*iovcount = seg;
if (unlikely(log))
*log_num = nlogs;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 01c01cb3933f..d695b1673ae5 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -911,6 +911,23 @@ vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
return 0;
}
+static int vhost_scsi_to_tcm_attr(int attr)
+{
+ switch (attr) {
+ case VIRTIO_SCSI_S_SIMPLE:
+ return TCM_SIMPLE_TAG;
+ case VIRTIO_SCSI_S_ORDERED:
+ return TCM_ORDERED_TAG;
+ case VIRTIO_SCSI_S_HEAD:
+ return TCM_HEAD_TAG;
+ case VIRTIO_SCSI_S_ACA:
+ return TCM_ACA_TAG;
+ default:
+ break;
+ }
+ return TCM_SIMPLE_TAG;
+}
+
static void tcm_vhost_submission_work(struct work_struct *work)
{
struct tcm_vhost_cmd *cmd =
@@ -936,9 +953,10 @@ static void tcm_vhost_submission_work(struct work_struct *work)
rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
cmd->tvc_lun, cmd->tvc_exp_data_len,
- cmd->tvc_task_attr, cmd->tvc_data_direction,
- TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
- NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count);
+ vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
+ cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
+ sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
+ cmd->tvc_prot_sgl_count);
if (rc < 0) {
transport_send_check_condition_and_sense(se_cmd,
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
diff --git a/drivers/video/fbdev/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c
index 1c29bd19e3d5..0e5fde1d3ffb 100644
--- a/drivers/video/fbdev/broadsheetfb.c
+++ b/drivers/video/fbdev/broadsheetfb.c
@@ -636,7 +636,7 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par,
err = broadsheet_spiflash_read_range(par, start_sector_addr,
data_start_addr, sector_buffer);
if (err)
- return err;
+ goto out;
}
/* now we copy our data into the right place in the sector buffer */
@@ -657,7 +657,7 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par,
err = broadsheet_spiflash_read_range(par, tail_start_addr,
tail_len, sector_buffer + tail_start_addr);
if (err)
- return err;
+ goto out;
}
/* if we got here we have the full sector that we want to rewrite. */
@@ -665,11 +665,13 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par,
/* first erase the sector */
err = broadsheet_spiflash_erase_sector(par, start_sector_addr);
if (err)
- return err;
+ goto out;
/* now write it */
err = broadsheet_spiflash_write_sector(par, start_sector_addr,
sector_buffer, sector_size);
+out:
+ kfree(sector_buffer);
return err;
}
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index 92cac803dee3..1085c0432158 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -402,7 +402,7 @@ static int __init simplefb_init(void)
if (ret)
return ret;
- if (IS_ENABLED(CONFIG_OF) && of_chosen) {
+ if (IS_ENABLED(CONFIG_OF_ADDRESS) && of_chosen) {
for_each_child_of_node(of_chosen, np) {
if (of_device_is_compatible(np, "simple-framebuffer"))
of_platform_device_create(np, NULL, NULL);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 2ef9529809d8..9756f21b809e 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -282,6 +282,7 @@ void vp_del_vqs(struct virtio_device *vdev)
vp_free_vectors(vdev);
kfree(vp_dev->vqs);
+ vp_dev->vqs = NULL;
}
static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
@@ -421,15 +422,6 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
return 0;
}
-void virtio_pci_release_dev(struct device *_d)
-{
- /*
- * No need for a release method as we allocate/free
- * all devices together with the pci devices.
- * Provide an empty one to avoid getting a warning from core.
- */
-}
-
#ifdef CONFIG_PM_SLEEP
static int virtio_pci_freeze(struct device *dev)
{
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index adddb647b21d..5a497289b7e9 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -126,7 +126,6 @@ const char *vp_bus_name(struct virtio_device *vdev);
* - ignore the affinity request if we're using INTX
*/
int vp_set_vq_affinity(struct virtqueue *vq, int cpu);
-void virtio_pci_release_dev(struct device *);
int virtio_pci_legacy_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id);
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index 6c76f0f5658c..a5486e65e04b 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -211,6 +211,17 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
.set_vq_affinity = vp_set_vq_affinity,
};
+static void virtio_pci_release_dev(struct device *_d)
+{
+ struct virtio_device *vdev = dev_to_virtio(_d);
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ /* As struct device is a kobject, it's not safe to
+ * free the memory (including the reference counter itself)
+ * until it's release callback. */
+ kfree(vp_dev);
+}
+
/* the PCI probing function */
int virtio_pci_legacy_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
@@ -302,5 +313,4 @@ void virtio_pci_legacy_remove(struct pci_dev *pci_dev)
pci_iounmap(pci_dev, vp_dev->ioaddr);
pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
- kfree(vp_dev);
}
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 2d3e32ebfd15..8729cf68d2fe 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1552,7 +1552,6 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
{
int ret;
int type;
- struct btrfs_tree_block_info *info;
struct btrfs_extent_inline_ref *eiref;
if (*ptr == (unsigned long)-1)
@@ -1573,9 +1572,17 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
}
/* we can treat both ref types equally here */
- info = (struct btrfs_tree_block_info *)(ei + 1);
*out_root = btrfs_extent_inline_ref_offset(eb, eiref);
- *out_level = btrfs_tree_block_level(eb, info);
+
+ if (key->type == BTRFS_EXTENT_ITEM_KEY) {
+ struct btrfs_tree_block_info *info;
+
+ info = (struct btrfs_tree_block_info *)(ei + 1);
+ *out_level = btrfs_tree_block_level(eb, info);
+ } else {
+ ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
+ *out_level = (u8)key->offset;
+ }
if (ret == 1)
*ptr = (unsigned long)-1;
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 054577bddaf2..de4e70fb3cbb 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1857,6 +1857,14 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode)
{
struct btrfs_delayed_node *delayed_node;
+ /*
+ * we don't do delayed inode updates during log recovery because it
+ * leads to enospc problems. This means we also can't do
+ * delayed inode refs
+ */
+ if (BTRFS_I(inode)->root->fs_info->log_root_recovering)
+ return -EAGAIN;
+
delayed_node = btrfs_get_or_create_delayed_node(inode);
if (IS_ERR(delayed_node))
return PTR_ERR(delayed_node);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index a80b97100d90..15116585e714 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3139,9 +3139,11 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf;
ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
- if (ret < 0)
+ if (ret) {
+ if (ret > 0)
+ ret = -ENOENT;
goto fail;
- BUG_ON(ret); /* Corruption */
+ }
leaf = path->nodes[0];
bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
@@ -3149,11 +3151,9 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
fail:
- if (ret) {
+ if (ret)
btrfs_abort_transaction(trans, root, ret);
- return ret;
- }
- return 0;
+ return ret;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index e687bb0dc73a..8bf326affb94 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6255,8 +6255,10 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
out_fail:
btrfs_end_transaction(trans, root);
- if (drop_on_err)
+ if (drop_on_err) {
+ inode_dec_link_count(inode);
iput(inode);
+ }
btrfs_balance_delayed_items(root);
btrfs_btree_balance_dirty(root);
return err;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index f2bb13a23f86..9e1569ffbf6e 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -2607,9 +2607,9 @@ static int scrub_extent_for_parity(struct scrub_parity *sparity,
ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
flags, gen, mirror_num,
have_csum ? csum : NULL);
-skip:
if (ret)
return ret;
+skip:
len -= l;
logical += l;
physical += l;
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index f5013d92a7e6..c81c0e004588 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1416,7 +1416,7 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
}
}
- dout("fill_inline_data %p %llx.%llx len %lu locked_page %p\n",
+ dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n",
inode, ceph_vinop(inode), len, locked_page);
if (len > 0) {
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index c35c5c614e38..06ea5cd05cd9 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -239,23 +239,21 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
return err;
}
-/**
- * Must be called with lock_flocks() already held. Fills in the passed
- * counter variables, so you can prepare pagelist metadata before calling
- * ceph_encode_locks.
+/*
+ * Fills in the passed counter variables, so you can prepare pagelist metadata
+ * before calling ceph_encode_locks.
*/
void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
{
- struct file_lock *lock;
+ struct file_lock_context *ctx;
*fcntl_count = 0;
*flock_count = 0;
- for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
- if (lock->fl_flags & FL_POSIX)
- ++(*fcntl_count);
- else if (lock->fl_flags & FL_FLOCK)
- ++(*flock_count);
+ ctx = inode->i_flctx;
+ if (ctx) {
+ *fcntl_count = ctx->flc_posix_cnt;
+ *flock_count = ctx->flc_flock_cnt;
}
dout("counted %d flock locks and %d fcntl locks",
*flock_count, *fcntl_count);
@@ -271,6 +269,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
int num_fcntl_locks, int num_flock_locks)
{
struct file_lock *lock;
+ struct file_lock_context *ctx = inode->i_flctx;
int err = 0;
int seen_fcntl = 0;
int seen_flock = 0;
@@ -279,33 +278,34 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
dout("encoding %d flock and %d fcntl locks", num_flock_locks,
num_fcntl_locks);
- for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
- if (lock->fl_flags & FL_POSIX) {
- ++seen_fcntl;
- if (seen_fcntl > num_fcntl_locks) {
- err = -ENOSPC;
- goto fail;
- }
- err = lock_to_ceph_filelock(lock, &flocks[l]);
- if (err)
- goto fail;
- ++l;
+ if (!ctx)
+ return 0;
+
+ spin_lock(&ctx->flc_lock);
+ list_for_each_entry(lock, &ctx->flc_flock, fl_list) {
+ ++seen_fcntl;
+ if (seen_fcntl > num_fcntl_locks) {
+ err = -ENOSPC;
+ goto fail;
}
+ err = lock_to_ceph_filelock(lock, &flocks[l]);
+ if (err)
+ goto fail;
+ ++l;
}
- for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
- if (lock->fl_flags & FL_FLOCK) {
- ++seen_flock;
- if (seen_flock > num_flock_locks) {
- err = -ENOSPC;
- goto fail;
- }
- err = lock_to_ceph_filelock(lock, &flocks[l]);
- if (err)
- goto fail;
- ++l;
+ list_for_each_entry(lock, &ctx->flc_flock, fl_list) {
+ ++seen_flock;
+ if (seen_flock > num_flock_locks) {
+ err = -ENOSPC;
+ goto fail;
}
+ err = lock_to_ceph_filelock(lock, &flocks[l]);
+ if (err)
+ goto fail;
+ ++l;
}
fail:
+ spin_unlock(&ctx->flc_lock);
return err;
}
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index d2171f4a6980..5f62fb7a5d0a 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2700,20 +2700,16 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
struct ceph_filelock *flocks;
encode_again:
- spin_lock(&inode->i_lock);
ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
- spin_unlock(&inode->i_lock);
flocks = kmalloc((num_fcntl_locks+num_flock_locks) *
sizeof(struct ceph_filelock), GFP_NOFS);
if (!flocks) {
err = -ENOMEM;
goto out_free;
}
- spin_lock(&inode->i_lock);
err = ceph_encode_locks_to_buffer(inode, flocks,
num_fcntl_locks,
num_flock_locks);
- spin_unlock(&inode->i_lock);
if (err) {
kfree(flocks);
if (err == -ENOSPC)
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 96b7e9b7706d..8c2ca6f62bad 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1109,11 +1109,6 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
return rc;
}
-/* copied from fs/locks.c with a name change */
-#define cifs_for_each_lock(inode, lockp) \
- for (lockp = &inode->i_flock; *lockp != NULL; \
- lockp = &(*lockp)->fl_next)
-
struct lock_to_push {
struct list_head llist;
__u64 offset;
@@ -1128,8 +1123,9 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
{
struct inode *inode = cfile->dentry->d_inode;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
- struct file_lock *flock, **before;
- unsigned int count = 0, i = 0;
+ struct file_lock *flock;
+ struct file_lock_context *flctx = inode->i_flctx;
+ unsigned int i;
int rc = 0, xid, type;
struct list_head locks_to_send, *el;
struct lock_to_push *lck, *tmp;
@@ -1137,21 +1133,17 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
xid = get_xid();
- spin_lock(&inode->i_lock);
- cifs_for_each_lock(inode, before) {
- if ((*before)->fl_flags & FL_POSIX)
- count++;
- }
- spin_unlock(&inode->i_lock);
+ if (!flctx)
+ goto out;
INIT_LIST_HEAD(&locks_to_send);
/*
- * Allocating count locks is enough because no FL_POSIX locks can be
- * added to the list while we are holding cinode->lock_sem that
+ * Allocating flc_posix_cnt locks is enough because no FL_POSIX locks
+ * can be added to the list while we are holding cinode->lock_sem that
* protects locking operations of this inode.
*/
- for (; i < count; i++) {
+ for (i = 0; i < flctx->flc_posix_cnt; i++) {
lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
if (!lck) {
rc = -ENOMEM;
@@ -1161,11 +1153,8 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
}
el = locks_to_send.next;
- spin_lock(&inode->i_lock);
- cifs_for_each_lock(inode, before) {
- flock = *before;
- if ((flock->fl_flags & FL_POSIX) == 0)
- continue;
+ spin_lock(&flctx->flc_lock);
+ list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
if (el == &locks_to_send) {
/*
* The list ended. We don't have enough allocated
@@ -1185,9 +1174,8 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
lck->length = length;
lck->type = type;
lck->offset = flock->fl_start;
- el = el->next;
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&flctx->flc_lock);
list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
int stored_rc;
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index e5d3eadf47b1..bed43081720f 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -5166,8 +5166,8 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
/* fallback to generic here if not in extents fmt */
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
- return __generic_block_fiemap(inode, fieinfo, start, len,
- ext4_get_block);
+ return generic_block_fiemap(inode, fieinfo, start, len,
+ ext4_get_block);
if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
return -EBADR;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 513c12cf444c..8131be8c0af3 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -273,19 +273,24 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
* we determine this extent as a data or a hole according to whether the
* page cache has data or not.
*/
-static int ext4_find_unwritten_pgoff(struct inode *inode, int whence,
- loff_t endoff, loff_t *offset)
+static int ext4_find_unwritten_pgoff(struct inode *inode,
+ int whence,
+ struct ext4_map_blocks *map,
+ loff_t *offset)
{
struct pagevec pvec;
+ unsigned int blkbits;
pgoff_t index;
pgoff_t end;
+ loff_t endoff;
loff_t startoff;
loff_t lastoff;
int found = 0;
+ blkbits = inode->i_sb->s_blocksize_bits;
startoff = *offset;
lastoff = startoff;
-
+ endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
index = startoff >> PAGE_CACHE_SHIFT;
end = endoff >> PAGE_CACHE_SHIFT;
@@ -403,144 +408,147 @@ out:
static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
{
struct inode *inode = file->f_mapping->host;
- struct fiemap_extent_info fie;
- struct fiemap_extent ext[2];
- loff_t next;
- int i, ret = 0;
+ struct ext4_map_blocks map;
+ struct extent_status es;
+ ext4_lblk_t start, last, end;
+ loff_t dataoff, isize;
+ int blkbits;
+ int ret = 0;
mutex_lock(&inode->i_mutex);
- if (offset >= inode->i_size) {
+
+ isize = i_size_read(inode);
+ if (offset >= isize) {
mutex_unlock(&inode->i_mutex);
return -ENXIO;
}
- fie.fi_flags = 0;
- fie.fi_extents_max = 2;
- fie.fi_extents_start = (struct fiemap_extent __user *) &ext;
- while (1) {
- mm_segment_t old_fs = get_fs();
-
- fie.fi_extents_mapped = 0;
- memset(ext, 0, sizeof(*ext) * fie.fi_extents_max);
-
- set_fs(get_ds());
- ret = ext4_fiemap(inode, &fie, offset, maxsize - offset);
- set_fs(old_fs);
- if (ret)
+
+ blkbits = inode->i_sb->s_blocksize_bits;
+ start = offset >> blkbits;
+ last = start;
+ end = isize >> blkbits;
+ dataoff = offset;
+
+ do {
+ map.m_lblk = last;
+ map.m_len = end - last + 1;
+ ret = ext4_map_blocks(NULL, inode, &map, 0);
+ if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
+ if (last != start)
+ dataoff = (loff_t)last << blkbits;
break;
+ }
- /* No extents found, EOF */
- if (!fie.fi_extents_mapped) {
- ret = -ENXIO;
+ /*
+ * If there is a delay extent at this offset,
+ * it will be as a data.
+ */
+ ext4_es_find_delayed_extent_range(inode, last, last, &es);
+ if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
+ if (last != start)
+ dataoff = (loff_t)last << blkbits;
break;
}
- for (i = 0; i < fie.fi_extents_mapped; i++) {
- next = (loff_t)(ext[i].fe_length + ext[i].fe_logical);
- if (offset < (loff_t)ext[i].fe_logical)
- offset = (loff_t)ext[i].fe_logical;
- /*
- * If extent is not unwritten, then it contains valid
- * data, mapped or delayed.
- */
- if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN))
- goto out;
+ /*
+ * If there is a unwritten extent at this offset,
+ * it will be as a data or a hole according to page
+ * cache that has data or not.
+ */
+ if (map.m_flags & EXT4_MAP_UNWRITTEN) {
+ int unwritten;
+ unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
+ &map, &dataoff);
+ if (unwritten)
+ break;
+ }
- /*
- * If there is a unwritten extent at this offset,
- * it will be as a data or a hole according to page
- * cache that has data or not.
- */
- if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
- next, &offset))
- goto out;
+ last++;
+ dataoff = (loff_t)last << blkbits;
+ } while (last <= end);
- if (ext[i].fe_flags & FIEMAP_EXTENT_LAST) {
- ret = -ENXIO;
- goto out;
- }
- offset = next;
- }
- }
- if (offset > inode->i_size)
- offset = inode->i_size;
-out:
mutex_unlock(&inode->i_mutex);
- if (ret)
- return ret;
- return vfs_setpos(file, offset, maxsize);
+ if (dataoff > isize)
+ return -ENXIO;
+
+ return vfs_setpos(file, dataoff, maxsize);
}
/*
- * ext4_seek_hole() retrieves the offset for SEEK_HOLE
+ * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
*/
static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
{
struct inode *inode = file->f_mapping->host;
- struct fiemap_extent_info fie;
- struct fiemap_extent ext[2];
- loff_t next;
- int i, ret = 0;
+ struct ext4_map_blocks map;
+ struct extent_status es;
+ ext4_lblk_t start, last, end;
+ loff_t holeoff, isize;
+ int blkbits;
+ int ret = 0;
mutex_lock(&inode->i_mutex);
- if (offset >= inode->i_size) {
+
+ isize = i_size_read(inode);
+ if (offset >= isize) {
mutex_unlock(&inode->i_mutex);
return -ENXIO;
}
- fie.fi_flags = 0;
- fie.fi_extents_max = 2;
- fie.fi_extents_start = (struct fiemap_extent __user *)&ext;
- while (1) {
- mm_segment_t old_fs = get_fs();
-
- fie.fi_extents_mapped = 0;
- memset(ext, 0, sizeof(*ext));
+ blkbits = inode->i_sb->s_blocksize_bits;
+ start = offset >> blkbits;
+ last = start;
+ end = isize >> blkbits;
+ holeoff = offset;
- set_fs(get_ds());
- ret = ext4_fiemap(inode, &fie, offset, maxsize - offset);
- set_fs(old_fs);
- if (ret)
- break;
+ do {
+ map.m_lblk = last;
+ map.m_len = end - last + 1;
+ ret = ext4_map_blocks(NULL, inode, &map, 0);
+ if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
+ last += ret;
+ holeoff = (loff_t)last << blkbits;
+ continue;
+ }
- /* No extents found */
- if (!fie.fi_extents_mapped)
- break;
+ /*
+ * If there is a delay extent at this offset,
+ * we will skip this extent.
+ */
+ ext4_es_find_delayed_extent_range(inode, last, last, &es);
+ if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
+ last = es.es_lblk + es.es_len;
+ holeoff = (loff_t)last << blkbits;
+ continue;
+ }
- for (i = 0; i < fie.fi_extents_mapped; i++) {
- next = (loff_t)(ext[i].fe_logical + ext[i].fe_length);
- /*
- * If extent is not unwritten, then it contains valid
- * data, mapped or delayed.
- */
- if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN)) {
- if (offset < (loff_t)ext[i].fe_logical)
- goto out;
- offset = next;
+ /*
+ * If there is a unwritten extent at this offset,
+ * it will be as a data or a hole according to page
+ * cache that has data or not.
+ */
+ if (map.m_flags & EXT4_MAP_UNWRITTEN) {
+ int unwritten;
+ unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
+ &map, &holeoff);
+ if (!unwritten) {
+ last += ret;
+ holeoff = (loff_t)last << blkbits;
continue;
}
- /*
- * If there is a unwritten extent at this offset,
- * it will be as a data or a hole according to page
- * cache that has data or not.
- */
- if (ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
- next, &offset))
- goto out;
-
- offset = next;
- if (ext[i].fe_flags & FIEMAP_EXTENT_LAST)
- goto out;
}
- }
- if (offset > inode->i_size)
- offset = inode->i_size;
-out:
+
+ /* find a hole */
+ break;
+ } while (last <= end);
+
mutex_unlock(&inode->i_mutex);
- if (ret)
- return ret;
- return vfs_setpos(file, offset, maxsize);
+ if (holeoff > isize)
+ holeoff = isize;
+
+ return vfs_setpos(file, holeoff, maxsize);
}
/*
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index bf76f405a5f9..8a8ec6293b19 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -24,6 +24,18 @@ int ext4_resize_begin(struct super_block *sb)
return -EPERM;
/*
+ * If we are not using the primary superblock/GDT copy don't resize,
+ * because the user tools have no way of handling this. Probably a
+ * bad time to do it anyways.
+ */
+ if (EXT4_SB(sb)->s_sbh->b_blocknr !=
+ le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
+ ext4_warning(sb, "won't resize using backup superblock at %llu",
+ (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
+ return -EPERM;
+ }
+
+ /*
* We are not allowed to do online-resizing on a filesystem mounted
* with error, because it can destroy the filesystem easily.
*/
@@ -758,18 +770,6 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
"EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
gdb_num);
- /*
- * If we are not using the primary superblock/GDT copy don't resize,
- * because the user tools have no way of handling this. Probably a
- * bad time to do it anyways.
- */
- if (EXT4_SB(sb)->s_sbh->b_blocknr !=
- le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
- ext4_warning(sb, "won't resize using backup superblock at %llu",
- (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
- return -EPERM;
- }
-
gdb_bh = sb_bread(sb, gdblock);
if (!gdb_bh)
return -EIO;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 43c92b1685cb..74c5f53595fb 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3482,7 +3482,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
- ext4_warning(sb, KERN_INFO "metadata_csum and uninit_bg are "
+ ext4_warning(sb, "metadata_csum and uninit_bg are "
"redundant flags; please run fsck.");
/* Check for a known checksum algorithm */
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 99d440a4a6ba..ee85cd4e136a 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -740,14 +740,15 @@ static int __init fcntl_init(void)
* Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
* is defined as O_NONBLOCK on some platforms and not on others.
*/
- BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
+ BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
O_RDONLY | O_WRONLY | O_RDWR |
O_CREAT | O_EXCL | O_NOCTTY |
O_TRUNC | O_APPEND | /* O_NONBLOCK | */
__O_SYNC | O_DSYNC | FASYNC |
O_DIRECT | O_LARGEFILE | O_DIRECTORY |
O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
- __FMODE_EXEC | O_PATH | __O_TMPFILE
+ __FMODE_EXEC | O_PATH | __O_TMPFILE |
+ __FMODE_NONOTIFY
));
fasync_cache = kmem_cache_create("fasync_cache",
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index ba1107977f2e..ed19a7d622fa 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -131,6 +131,13 @@ static void fuse_req_init_context(struct fuse_req *req)
req->in.h.pid = current->pid;
}
+void fuse_set_initialized(struct fuse_conn *fc)
+{
+ /* Make sure stores before this are seen on another CPU */
+ smp_wmb();
+ fc->initialized = 1;
+}
+
static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
{
return !fc->initialized || (for_background && fc->blocked);
@@ -155,6 +162,8 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
if (intr)
goto out;
}
+ /* Matches smp_wmb() in fuse_set_initialized() */
+ smp_rmb();
err = -ENOTCONN;
if (!fc->connected)
@@ -253,6 +262,8 @@ struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
atomic_inc(&fc->num_waiting);
wait_event(fc->blocked_waitq, fc->initialized);
+ /* Matches smp_wmb() in fuse_set_initialized() */
+ smp_rmb();
req = fuse_request_alloc(0);
if (!req)
req = get_reserved_req(fc, file);
@@ -511,6 +522,39 @@ void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
}
EXPORT_SYMBOL_GPL(fuse_request_send);
+static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
+{
+ if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
+ args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
+
+ if (fc->minor < 9) {
+ switch (args->in.h.opcode) {
+ case FUSE_LOOKUP:
+ case FUSE_CREATE:
+ case FUSE_MKNOD:
+ case FUSE_MKDIR:
+ case FUSE_SYMLINK:
+ case FUSE_LINK:
+ args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
+ break;
+ case FUSE_GETATTR:
+ case FUSE_SETATTR:
+ args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
+ break;
+ }
+ }
+ if (fc->minor < 12) {
+ switch (args->in.h.opcode) {
+ case FUSE_CREATE:
+ args->in.args[0].size = sizeof(struct fuse_open_in);
+ break;
+ case FUSE_MKNOD:
+ args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
+ break;
+ }
+ }
+}
+
ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
{
struct fuse_req *req;
@@ -520,6 +564,9 @@ ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
if (IS_ERR(req))
return PTR_ERR(req);
+ /* Needs to be done after fuse_get_req() so that fc->minor is valid */
+ fuse_adjust_compat(fc, args);
+
req->in.h.opcode = args->in.h.opcode;
req->in.h.nodeid = args->in.h.nodeid;
req->in.numargs = args->in.numargs;
@@ -2127,7 +2174,7 @@ void fuse_abort_conn(struct fuse_conn *fc)
if (fc->connected) {
fc->connected = 0;
fc->blocked = 0;
- fc->initialized = 1;
+ fuse_set_initialized(fc);
end_io_requests(fc);
end_queued_requests(fc);
end_polls(fc);
@@ -2146,7 +2193,7 @@ int fuse_dev_release(struct inode *inode, struct file *file)
spin_lock(&fc->lock);
fc->connected = 0;
fc->blocked = 0;
- fc->initialized = 1;
+ fuse_set_initialized(fc);
end_queued_requests(fc);
end_polls(fc);
wake_up_all(&fc->blocked_waitq);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 252b8a5de8b5..08e7b1a9d5d0 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -156,10 +156,7 @@ static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
args->in.args[0].size = name->len + 1;
args->in.args[0].value = name->name;
args->out.numargs = 1;
- if (fc->minor < 9)
- args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
- else
- args->out.args[0].size = sizeof(struct fuse_entry_out);
+ args->out.args[0].size = sizeof(struct fuse_entry_out);
args->out.args[0].value = outarg;
}
@@ -422,16 +419,12 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
args.in.h.opcode = FUSE_CREATE;
args.in.h.nodeid = get_node_id(dir);
args.in.numargs = 2;
- args.in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) :
- sizeof(inarg);
+ args.in.args[0].size = sizeof(inarg);
args.in.args[0].value = &inarg;
args.in.args[1].size = entry->d_name.len + 1;
args.in.args[1].value = entry->d_name.name;
args.out.numargs = 2;
- if (fc->minor < 9)
- args.out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
- else
- args.out.args[0].size = sizeof(outentry);
+ args.out.args[0].size = sizeof(outentry);
args.out.args[0].value = &outentry;
args.out.args[1].size = sizeof(outopen);
args.out.args[1].value = &outopen;
@@ -539,10 +532,7 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args,
memset(&outarg, 0, sizeof(outarg));
args->in.h.nodeid = get_node_id(dir);
args->out.numargs = 1;
- if (fc->minor < 9)
- args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
- else
- args->out.args[0].size = sizeof(outarg);
+ args->out.args[0].size = sizeof(outarg);
args->out.args[0].value = &outarg;
err = fuse_simple_request(fc, args);
if (err)
@@ -592,8 +582,7 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode,
inarg.umask = current_umask();
args.in.h.opcode = FUSE_MKNOD;
args.in.numargs = 2;
- args.in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE :
- sizeof(inarg);
+ args.in.args[0].size = sizeof(inarg);
args.in.args[0].value = &inarg;
args.in.args[1].size = entry->d_name.len + 1;
args.in.args[1].value = entry->d_name.name;
@@ -899,10 +888,7 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
args.in.args[0].size = sizeof(inarg);
args.in.args[0].value = &inarg;
args.out.numargs = 1;
- if (fc->minor < 9)
- args.out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
- else
- args.out.args[0].size = sizeof(outarg);
+ args.out.args[0].size = sizeof(outarg);
args.out.args[0].value = &outarg;
err = fuse_simple_request(fc, &args);
if (!err) {
@@ -1574,10 +1560,7 @@ static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_args *args,
args->in.args[0].size = sizeof(*inarg_p);
args->in.args[0].value = inarg_p;
args->out.numargs = 1;
- if (fc->minor < 9)
- args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
- else
- args->out.args[0].size = sizeof(*outarg_p);
+ args->out.args[0].size = sizeof(*outarg_p);
args->out.args[0].value = outarg_p;
}
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index e0fc6725d1d0..1cdfb07c1376 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -906,4 +906,6 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc);
int fuse_do_setattr(struct inode *inode, struct iattr *attr,
struct file *file);
+void fuse_set_initialized(struct fuse_conn *fc);
+
#endif /* _FS_FUSE_I_H */
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 6749109f255d..f38256e4476e 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -424,8 +424,7 @@ static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
args.in.h.opcode = FUSE_STATFS;
args.in.h.nodeid = get_node_id(dentry->d_inode);
args.out.numargs = 1;
- args.out.args[0].size =
- fc->minor < 4 ? FUSE_COMPAT_STATFS_SIZE : sizeof(outarg);
+ args.out.args[0].size = sizeof(outarg);
args.out.args[0].value = &outarg;
err = fuse_simple_request(fc, &args);
if (!err)
@@ -898,7 +897,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
fc->max_write = max_t(unsigned, 4096, fc->max_write);
fc->conn_init = 1;
}
- fc->initialized = 1;
+ fuse_set_initialized(fc);
wake_up_all(&fc->blocked_waitq);
}
diff --git a/fs/inode.c b/fs/inode.c
index aa149e7262ac..f30872ade6d7 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -194,7 +194,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
#ifdef CONFIG_FSNOTIFY
inode->i_fsnotify_mask = 0;
#endif
-
+ inode->i_flctx = NULL;
this_cpu_inc(nr_inodes);
return 0;
@@ -237,6 +237,7 @@ void __destroy_inode(struct inode *inode)
BUG_ON(inode_has_buffers(inode));
security_inode_free(inode);
fsnotify_inode_delete(inode);
+ locks_free_lock_context(inode->i_flctx);
if (!inode->i_nlink) {
WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
atomic_long_dec(&inode->i_sb->s_remove_count);
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index d12ff4e2dbe7..665ef5a05183 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -164,12 +164,15 @@ nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file,
{
struct inode *inode = nlmsvc_file_inode(file);
struct file_lock *fl;
+ struct file_lock_context *flctx = inode->i_flctx;
struct nlm_host *lockhost;
+ if (!flctx || list_empty_careful(&flctx->flc_posix))
+ return 0;
again:
file->f_locks = 0;
- spin_lock(&inode->i_lock);
- for (fl = inode->i_flock; fl; fl = fl->fl_next) {
+ spin_lock(&flctx->flc_lock);
+ list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
if (fl->fl_lmops != &nlmsvc_lock_operations)
continue;
@@ -180,7 +183,7 @@ again:
if (match(lockhost, host)) {
struct file_lock lock = *fl;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&flctx->flc_lock);
lock.fl_type = F_UNLCK;
lock.fl_start = 0;
lock.fl_end = OFFSET_MAX;
@@ -192,7 +195,7 @@ again:
goto again;
}
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&flctx->flc_lock);
return 0;
}
@@ -223,18 +226,21 @@ nlm_file_inuse(struct nlm_file *file)
{
struct inode *inode = nlmsvc_file_inode(file);
struct file_lock *fl;
+ struct file_lock_context *flctx = inode->i_flctx;
if (file->f_count || !list_empty(&file->f_blocks) || file->f_shares)
return 1;
- spin_lock(&inode->i_lock);
- for (fl = inode->i_flock; fl; fl = fl->fl_next) {
- if (fl->fl_lmops == &nlmsvc_lock_operations) {
- spin_unlock(&inode->i_lock);
- return 1;
+ if (flctx && !list_empty_careful(&flctx->flc_posix)) {
+ spin_lock(&flctx->flc_lock);
+ list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
+ if (fl->fl_lmops == &nlmsvc_lock_operations) {
+ spin_unlock(&flctx->flc_lock);
+ return 1;
+ }
}
+ spin_unlock(&flctx->flc_lock);
}
- spin_unlock(&inode->i_lock);
file->f_locks = 0;
return 0;
}
diff --git a/fs/locks.c b/fs/locks.c
index 735b8d3fa78c..4d0d41163a50 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -157,14 +157,11 @@ static int target_leasetype(struct file_lock *fl)
int leases_enable = 1;
int lease_break_time = 45;
-#define for_each_lock(inode, lockp) \
- for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
-
/*
* The global file_lock_list is only used for displaying /proc/locks, so we
* keep a list on each CPU, with each list protected by its own spinlock via
* the file_lock_lglock. Note that alterations to the list also require that
- * the relevant i_lock is held.
+ * the relevant flc_lock is held.
*/
DEFINE_STATIC_LGLOCK(file_lock_lglock);
static DEFINE_PER_CPU(struct hlist_head, file_lock_list);
@@ -192,21 +189,68 @@ static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
* contrast to those that are acting as records of acquired locks).
*
* Note that when we acquire this lock in order to change the above fields,
- * we often hold the i_lock as well. In certain cases, when reading the fields
+ * we often hold the flc_lock as well. In certain cases, when reading the fields
* protected by this lock, we can skip acquiring it iff we already hold the
- * i_lock.
+ * flc_lock.
*
* In particular, adding an entry to the fl_block list requires that you hold
- * both the i_lock and the blocked_lock_lock (acquired in that order). Deleting
- * an entry from the list however only requires the file_lock_lock.
+ * both the flc_lock and the blocked_lock_lock (acquired in that order).
+ * Deleting an entry from the list however only requires the file_lock_lock.
*/
static DEFINE_SPINLOCK(blocked_lock_lock);
+static struct kmem_cache *flctx_cache __read_mostly;
static struct kmem_cache *filelock_cache __read_mostly;
+static struct file_lock_context *
+locks_get_lock_context(struct inode *inode)
+{
+ struct file_lock_context *new;
+
+ if (likely(inode->i_flctx))
+ goto out;
+
+ new = kmem_cache_alloc(flctx_cache, GFP_KERNEL);
+ if (!new)
+ goto out;
+
+ spin_lock_init(&new->flc_lock);
+ INIT_LIST_HEAD(&new->flc_flock);
+ INIT_LIST_HEAD(&new->flc_posix);
+ INIT_LIST_HEAD(&new->flc_lease);
+
+ /*
+ * Assign the pointer if it's not already assigned. If it is, then
+ * free the context we just allocated.
+ */
+ spin_lock(&inode->i_lock);
+ if (likely(!inode->i_flctx)) {
+ inode->i_flctx = new;
+ new = NULL;
+ }
+ spin_unlock(&inode->i_lock);
+
+ if (new)
+ kmem_cache_free(flctx_cache, new);
+out:
+ return inode->i_flctx;
+}
+
+void
+locks_free_lock_context(struct file_lock_context *ctx)
+{
+ if (ctx) {
+ WARN_ON_ONCE(!list_empty(&ctx->flc_flock));
+ WARN_ON_ONCE(!list_empty(&ctx->flc_posix));
+ WARN_ON_ONCE(!list_empty(&ctx->flc_lease));
+ kmem_cache_free(flctx_cache, ctx);
+ }
+}
+
static void locks_init_lock_heads(struct file_lock *fl)
{
INIT_HLIST_NODE(&fl->fl_link);
+ INIT_LIST_HEAD(&fl->fl_list);
INIT_LIST_HEAD(&fl->fl_block);
init_waitqueue_head(&fl->fl_wait);
}
@@ -243,6 +287,7 @@ EXPORT_SYMBOL_GPL(locks_release_private);
void locks_free_lock(struct file_lock *fl)
{
BUG_ON(waitqueue_active(&fl->fl_wait));
+ BUG_ON(!list_empty(&fl->fl_list));
BUG_ON(!list_empty(&fl->fl_block));
BUG_ON(!hlist_unhashed(&fl->fl_link));
@@ -257,8 +302,8 @@ locks_dispose_list(struct list_head *dispose)
struct file_lock *fl;
while (!list_empty(dispose)) {
- fl = list_first_entry(dispose, struct file_lock, fl_block);
- list_del_init(&fl->fl_block);
+ fl = list_first_entry(dispose, struct file_lock, fl_list);
+ list_del_init(&fl->fl_list);
locks_free_lock(fl);
}
}
@@ -513,7 +558,7 @@ static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
return fl1->fl_owner == fl2->fl_owner;
}
-/* Must be called with the i_lock held! */
+/* Must be called with the flc_lock held! */
static void locks_insert_global_locks(struct file_lock *fl)
{
lg_local_lock(&file_lock_lglock);
@@ -522,12 +567,12 @@ static void locks_insert_global_locks(struct file_lock *fl)
lg_local_unlock(&file_lock_lglock);
}
-/* Must be called with the i_lock held! */
+/* Must be called with the flc_lock held! */
static void locks_delete_global_locks(struct file_lock *fl)
{
/*
* Avoid taking lock if already unhashed. This is safe since this check
- * is done while holding the i_lock, and new insertions into the list
+ * is done while holding the flc_lock, and new insertions into the list
* also require that it be held.
*/
if (hlist_unhashed(&fl->fl_link))
@@ -579,10 +624,10 @@ static void locks_delete_block(struct file_lock *waiter)
* the order they blocked. The documentation doesn't require this but
* it seems like the reasonable thing to do.
*
- * Must be called with both the i_lock and blocked_lock_lock held. The fl_block
- * list itself is protected by the blocked_lock_lock, but by ensuring that the
- * i_lock is also held on insertions we can avoid taking the blocked_lock_lock
- * in some cases when we see that the fl_block list is empty.
+ * Must be called with both the flc_lock and blocked_lock_lock held. The
+ * fl_block list itself is protected by the blocked_lock_lock, but by ensuring
+ * that the flc_lock is also held on insertions we can avoid taking the
+ * blocked_lock_lock in some cases when we see that the fl_block list is empty.
*/
static void __locks_insert_block(struct file_lock *blocker,
struct file_lock *waiter)
@@ -594,7 +639,7 @@ static void __locks_insert_block(struct file_lock *blocker,
locks_insert_global_blocked(waiter);
}
-/* Must be called with i_lock held. */
+/* Must be called with flc_lock held. */
static void locks_insert_block(struct file_lock *blocker,
struct file_lock *waiter)
{
@@ -606,15 +651,15 @@ static void locks_insert_block(struct file_lock *blocker,
/*
* Wake up processes blocked waiting for blocker.
*
- * Must be called with the inode->i_lock held!
+ * Must be called with the inode->flc_lock held!
*/
static void locks_wake_up_blocks(struct file_lock *blocker)
{
/*
* Avoid taking global lock if list is empty. This is safe since new
- * blocked requests are only added to the list under the i_lock, and
- * the i_lock is always held here. Note that removal from the fl_block
- * list does not require the i_lock, so we must recheck list_empty()
+ * blocked requests are only added to the list under the flc_lock, and
+ * the flc_lock is always held here. Note that removal from the fl_block
+ * list does not require the flc_lock, so we must recheck list_empty()
* after acquiring the blocked_lock_lock.
*/
if (list_empty(&blocker->fl_block))
@@ -635,63 +680,36 @@ static void locks_wake_up_blocks(struct file_lock *blocker)
spin_unlock(&blocked_lock_lock);
}
-/* Insert file lock fl into an inode's lock list at the position indicated
- * by pos. At the same time add the lock to the global file lock list.
- *
- * Must be called with the i_lock held!
- */
-static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
+static void
+locks_insert_lock_ctx(struct file_lock *fl, int *counter,
+ struct list_head *before)
{
fl->fl_nspid = get_pid(task_tgid(current));
-
- /* insert into file's list */
- fl->fl_next = *pos;
- *pos = fl;
-
+ list_add_tail(&fl->fl_list, before);
+ ++*counter;
locks_insert_global_locks(fl);
}
-/**
- * locks_delete_lock - Delete a lock and then free it.
- * @thisfl_p: pointer that points to the fl_next field of the previous
- * inode->i_flock list entry
- *
- * Unlink a lock from all lists and free the namespace reference, but don't
- * free it yet. Wake up processes that are blocked waiting for this lock and
- * notify the FS that the lock has been cleared.
- *
- * Must be called with the i_lock held!
- */
-static void locks_unlink_lock(struct file_lock **thisfl_p)
+static void
+locks_unlink_lock_ctx(struct file_lock *fl, int *counter)
{
- struct file_lock *fl = *thisfl_p;
-
locks_delete_global_locks(fl);
-
- *thisfl_p = fl->fl_next;
- fl->fl_next = NULL;
-
+ list_del_init(&fl->fl_list);
+ --*counter;
if (fl->fl_nspid) {
put_pid(fl->fl_nspid);
fl->fl_nspid = NULL;
}
-
locks_wake_up_blocks(fl);
}
-/*
- * Unlink a lock from all lists and free it.
- *
- * Must be called with i_lock held!
- */
-static void locks_delete_lock(struct file_lock **thisfl_p,
- struct list_head *dispose)
+static void
+locks_delete_lock_ctx(struct file_lock *fl, int *counter,
+ struct list_head *dispose)
{
- struct file_lock *fl = *thisfl_p;
-
- locks_unlink_lock(thisfl_p);
+ locks_unlink_lock_ctx(fl, counter);
if (dispose)
- list_add(&fl->fl_block, dispose);
+ list_add(&fl->fl_list, dispose);
else
locks_free_lock(fl);
}
@@ -746,22 +764,27 @@ void
posix_test_lock(struct file *filp, struct file_lock *fl)
{
struct file_lock *cfl;
+ struct file_lock_context *ctx;
struct inode *inode = file_inode(filp);
- spin_lock(&inode->i_lock);
- for (cfl = file_inode(filp)->i_flock; cfl; cfl = cfl->fl_next) {
- if (!IS_POSIX(cfl))
- continue;
- if (posix_locks_conflict(fl, cfl))
- break;
- }
- if (cfl) {
- locks_copy_conflock(fl, cfl);
- if (cfl->fl_nspid)
- fl->fl_pid = pid_vnr(cfl->fl_nspid);
- } else
+ ctx = inode->i_flctx;
+ if (!ctx || list_empty_careful(&ctx->flc_posix)) {
fl->fl_type = F_UNLCK;
- spin_unlock(&inode->i_lock);
+ return;
+ }
+
+ spin_lock(&ctx->flc_lock);
+ list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
+ if (posix_locks_conflict(fl, cfl)) {
+ locks_copy_conflock(fl, cfl);
+ if (cfl->fl_nspid)
+ fl->fl_pid = pid_vnr(cfl->fl_nspid);
+ goto out;
+ }
+ }
+ fl->fl_type = F_UNLCK;
+out:
+ spin_unlock(&ctx->flc_lock);
return;
}
EXPORT_SYMBOL(posix_test_lock);
@@ -845,34 +868,34 @@ static int posix_locks_deadlock(struct file_lock *caller_fl,
static int flock_lock_file(struct file *filp, struct file_lock *request)
{
struct file_lock *new_fl = NULL;
- struct file_lock **before;
- struct inode * inode = file_inode(filp);
+ struct file_lock *fl;
+ struct file_lock_context *ctx;
+ struct inode *inode = file_inode(filp);
int error = 0;
- int found = 0;
+ bool found = false;
LIST_HEAD(dispose);
+ ctx = locks_get_lock_context(inode);
+ if (!ctx)
+ return -ENOMEM;
+
if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
new_fl = locks_alloc_lock();
if (!new_fl)
return -ENOMEM;
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ctx->flc_lock);
if (request->fl_flags & FL_ACCESS)
goto find_conflict;
- for_each_lock(inode, before) {
- struct file_lock *fl = *before;
- if (IS_POSIX(fl))
- break;
- if (IS_LEASE(fl))
- continue;
+ list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
if (filp != fl->fl_file)
continue;
if (request->fl_type == fl->fl_type)
goto out;
- found = 1;
- locks_delete_lock(before, &dispose);
+ found = true;
+ locks_delete_lock_ctx(fl, &ctx->flc_flock_cnt, &dispose);
break;
}
@@ -887,18 +910,13 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
* give it the opportunity to lock the file.
*/
if (found) {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ctx->flc_lock);
cond_resched();
- spin_lock(&inode->i_lock);
+ spin_lock(&ctx->flc_lock);
}
find_conflict:
- for_each_lock(inode, before) {
- struct file_lock *fl = *before;
- if (IS_POSIX(fl))
- break;
- if (IS_LEASE(fl))
- continue;
+ list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
if (!flock_locks_conflict(request, fl))
continue;
error = -EAGAIN;
@@ -911,12 +929,12 @@ find_conflict:
if (request->fl_flags & FL_ACCESS)
goto out;
locks_copy_lock(new_fl, request);
- locks_insert_lock(before, new_fl);
+ locks_insert_lock_ctx(new_fl, &ctx->flc_flock_cnt, &ctx->flc_flock);
new_fl = NULL;
error = 0;
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ctx->flc_lock);
if (new_fl)
locks_free_lock(new_fl);
locks_dispose_list(&dispose);
@@ -925,16 +943,20 @@ out:
static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
{
- struct file_lock *fl;
+ struct file_lock *fl, *tmp;
struct file_lock *new_fl = NULL;
struct file_lock *new_fl2 = NULL;
struct file_lock *left = NULL;
struct file_lock *right = NULL;
- struct file_lock **before;
+ struct file_lock_context *ctx;
int error;
bool added = false;
LIST_HEAD(dispose);
+ ctx = locks_get_lock_context(inode);
+ if (!ctx)
+ return -ENOMEM;
+
/*
* We may need two file_lock structures for this operation,
* so we get them in advance to avoid races.
@@ -948,15 +970,14 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
new_fl2 = locks_alloc_lock();
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ctx->flc_lock);
/*
* New lock request. Walk all POSIX locks and look for conflicts. If
* there are any, either return error or put the request on the
* blocker's list of waiters and the global blocked_hash.
*/
if (request->fl_type != F_UNLCK) {
- for_each_lock(inode, before) {
- fl = *before;
+ list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
if (!IS_POSIX(fl))
continue;
if (!posix_locks_conflict(request, fl))
@@ -986,29 +1007,25 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
if (request->fl_flags & FL_ACCESS)
goto out;
- /*
- * Find the first old lock with the same owner as the new lock.
- */
-
- before = &inode->i_flock;
-
- /* First skip locks owned by other processes. */
- while ((fl = *before) && (!IS_POSIX(fl) ||
- !posix_same_owner(request, fl))) {
- before = &fl->fl_next;
+ /* Find the first old lock with the same owner as the new lock */
+ list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
+ if (posix_same_owner(request, fl))
+ break;
}
/* Process locks with this owner. */
- while ((fl = *before) && posix_same_owner(request, fl)) {
- /* Detect adjacent or overlapping regions (if same lock type)
- */
+ list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
+ if (!posix_same_owner(request, fl))
+ break;
+
+ /* Detect adjacent or overlapping regions (if same lock type) */
if (request->fl_type == fl->fl_type) {
/* In all comparisons of start vs end, use
* "start - 1" rather than "end + 1". If end
* is OFFSET_MAX, end + 1 will become negative.
*/
if (fl->fl_end < request->fl_start - 1)
- goto next_lock;
+ continue;
/* If the next lock in the list has entirely bigger
* addresses than the new one, insert the lock here.
*/
@@ -1029,18 +1046,18 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
else
request->fl_end = fl->fl_end;
if (added) {
- locks_delete_lock(before, &dispose);
+ locks_delete_lock_ctx(fl, &ctx->flc_posix_cnt,
+ &dispose);
continue;
}
request = fl;
added = true;
- }
- else {
+ } else {
/* Processing for different lock types is a bit
* more complex.
*/
if (fl->fl_end < request->fl_start)
- goto next_lock;
+ continue;
if (fl->fl_start > request->fl_end)
break;
if (request->fl_type == F_UNLCK)
@@ -1059,7 +1076,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
* one (This may happen several times).
*/
if (added) {
- locks_delete_lock(before, &dispose);
+ locks_delete_lock_ctx(fl,
+ &ctx->flc_posix_cnt, &dispose);
continue;
}
/*
@@ -1075,15 +1093,13 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
locks_copy_lock(new_fl, request);
request = new_fl;
new_fl = NULL;
- locks_delete_lock(before, &dispose);
- locks_insert_lock(before, request);
+ locks_insert_lock_ctx(request,
+ &ctx->flc_posix_cnt, &fl->fl_list);
+ locks_delete_lock_ctx(fl,
+ &ctx->flc_posix_cnt, &dispose);
added = true;
}
}
- /* Go on to next lock.
- */
- next_lock:
- before = &fl->fl_next;
}
/*
@@ -1108,7 +1124,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
goto out;
}
locks_copy_lock(new_fl, request);
- locks_insert_lock(before, new_fl);
+ locks_insert_lock_ctx(new_fl, &ctx->flc_posix_cnt,
+ &fl->fl_list);
new_fl = NULL;
}
if (right) {
@@ -1119,7 +1136,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
left = new_fl2;
new_fl2 = NULL;
locks_copy_lock(left, right);
- locks_insert_lock(before, left);
+ locks_insert_lock_ctx(left, &ctx->flc_posix_cnt,
+ &fl->fl_list);
}
right->fl_start = request->fl_end + 1;
locks_wake_up_blocks(right);
@@ -1129,7 +1147,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
locks_wake_up_blocks(left);
}
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ctx->flc_lock);
/*
* Free any unused locks.
*/
@@ -1199,22 +1217,29 @@ EXPORT_SYMBOL(posix_lock_file_wait);
*/
int locks_mandatory_locked(struct file *file)
{
+ int ret;
struct inode *inode = file_inode(file);
+ struct file_lock_context *ctx;
struct file_lock *fl;
+ ctx = inode->i_flctx;
+ if (!ctx || list_empty_careful(&ctx->flc_posix))
+ return 0;
+
/*
* Search the lock list for this inode for any POSIX locks.
*/
- spin_lock(&inode->i_lock);
- for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
- if (!IS_POSIX(fl))
- continue;
+ spin_lock(&ctx->flc_lock);
+ ret = 0;
+ list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
if (fl->fl_owner != current->files &&
- fl->fl_owner != file)
+ fl->fl_owner != file) {
+ ret = -EAGAIN;
break;
+ }
}
- spin_unlock(&inode->i_lock);
- return fl ? -EAGAIN : 0;
+ spin_unlock(&ctx->flc_lock);
+ return ret;
}
/**
@@ -1294,9 +1319,9 @@ static void lease_clear_pending(struct file_lock *fl, int arg)
}
/* We already had a lease on this file; just change its type */
-int lease_modify(struct file_lock **before, int arg, struct list_head *dispose)
+int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose)
{
- struct file_lock *fl = *before;
+ struct file_lock_context *flctx;
int error = assign_type(fl, arg);
if (error)
@@ -1306,6 +1331,7 @@ int lease_modify(struct file_lock **before, int arg, struct list_head *dispose)
if (arg == F_UNLCK) {
struct file *filp = fl->fl_file;
+ flctx = file_inode(filp)->i_flctx;
f_delown(filp);
filp->f_owner.signum = 0;
fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync);
@@ -1313,7 +1339,7 @@ int lease_modify(struct file_lock **before, int arg, struct list_head *dispose)
printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
fl->fl_fasync = NULL;
}
- locks_delete_lock(before, dispose);
+ locks_delete_lock_ctx(fl, &flctx->flc_lease_cnt, dispose);
}
return 0;
}
@@ -1329,20 +1355,17 @@ static bool past_time(unsigned long then)
static void time_out_leases(struct inode *inode, struct list_head *dispose)
{
- struct file_lock **before;
- struct file_lock *fl;
+ struct file_lock_context *ctx = inode->i_flctx;
+ struct file_lock *fl, *tmp;
- lockdep_assert_held(&inode->i_lock);
+ lockdep_assert_held(&ctx->flc_lock);
- before = &inode->i_flock;
- while ((fl = *before) && IS_LEASE(fl) && lease_breaking(fl)) {
+ list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) {
trace_time_out_leases(inode, fl);
if (past_time(fl->fl_downgrade_time))
- lease_modify(before, F_RDLCK, dispose);
+ lease_modify(fl, F_RDLCK, dispose);
if (past_time(fl->fl_break_time))
- lease_modify(before, F_UNLCK, dispose);
- if (fl == *before) /* lease_modify may have freed fl */
- before = &fl->fl_next;
+ lease_modify(fl, F_UNLCK, dispose);
}
}
@@ -1356,11 +1379,12 @@ static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
static bool
any_leases_conflict(struct inode *inode, struct file_lock *breaker)
{
+ struct file_lock_context *ctx = inode->i_flctx;
struct file_lock *fl;
- lockdep_assert_held(&inode->i_lock);
+ lockdep_assert_held(&ctx->flc_lock);
- for (fl = inode->i_flock ; fl && IS_LEASE(fl); fl = fl->fl_next) {
+ list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
if (leases_conflict(fl, breaker))
return true;
}
@@ -1384,7 +1408,8 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
{
int error = 0;
struct file_lock *new_fl;
- struct file_lock *fl, **before;
+ struct file_lock_context *ctx = inode->i_flctx;
+ struct file_lock *fl;
unsigned long break_time;
int want_write = (mode & O_ACCMODE) != O_RDONLY;
LIST_HEAD(dispose);
@@ -1394,7 +1419,13 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
return PTR_ERR(new_fl);
new_fl->fl_flags = type;
- spin_lock(&inode->i_lock);
+ /* typically we will check that ctx is non-NULL before calling */
+ if (!ctx) {
+ WARN_ON_ONCE(1);
+ return error;
+ }
+
+ spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
@@ -1408,9 +1439,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
break_time++; /* so that 0 means no break time */
}
- for (before = &inode->i_flock;
- ((fl = *before) != NULL) && IS_LEASE(fl);
- before = &fl->fl_next) {
+ list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
if (!leases_conflict(fl, new_fl))
continue;
if (want_write) {
@@ -1419,17 +1448,17 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
fl->fl_flags |= FL_UNLOCK_PENDING;
fl->fl_break_time = break_time;
} else {
- if (lease_breaking(inode->i_flock))
+ if (lease_breaking(fl))
continue;
fl->fl_flags |= FL_DOWNGRADE_PENDING;
fl->fl_downgrade_time = break_time;
}
if (fl->fl_lmops->lm_break(fl))
- locks_delete_lock(before, &dispose);
+ locks_delete_lock_ctx(fl, &ctx->flc_lease_cnt,
+ &dispose);
}
- fl = inode->i_flock;
- if (!fl || !IS_LEASE(fl))
+ if (list_empty(&ctx->flc_lease))
goto out;
if (mode & O_NONBLOCK) {
@@ -1439,18 +1468,19 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
}
restart:
- break_time = inode->i_flock->fl_break_time;
+ fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list);
+ break_time = fl->fl_break_time;
if (break_time != 0)
break_time -= jiffies;
if (break_time == 0)
break_time++;
- locks_insert_block(inode->i_flock, new_fl);
+ locks_insert_block(fl, new_fl);
trace_break_lease_block(inode, new_fl);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ctx->flc_lock);
locks_dispose_list(&dispose);
error = wait_event_interruptible_timeout(new_fl->fl_wait,
!new_fl->fl_next, break_time);
- spin_lock(&inode->i_lock);
+ spin_lock(&ctx->flc_lock);
trace_break_lease_unblock(inode, new_fl);
locks_delete_block(new_fl);
if (error >= 0) {
@@ -1462,12 +1492,10 @@ restart:
time_out_leases(inode, &dispose);
if (any_leases_conflict(inode, new_fl))
goto restart;
-
error = 0;
}
-
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ctx->flc_lock);
locks_dispose_list(&dispose);
locks_free_lock(new_fl);
return error;
@@ -1487,14 +1515,18 @@ EXPORT_SYMBOL(__break_lease);
void lease_get_mtime(struct inode *inode, struct timespec *time)
{
bool has_lease = false;
- struct file_lock *flock;
+ struct file_lock_context *ctx = inode->i_flctx;
+ struct file_lock *fl;
- if (inode->i_flock) {
- spin_lock(&inode->i_lock);
- flock = inode->i_flock;
- if (flock && IS_LEASE(flock) && (flock->fl_type == F_WRLCK))
- has_lease = true;
- spin_unlock(&inode->i_lock);
+ if (ctx && !list_empty_careful(&ctx->flc_lease)) {
+ spin_lock(&ctx->flc_lock);
+ if (!list_empty(&ctx->flc_lease)) {
+ fl = list_first_entry(&ctx->flc_lease,
+ struct file_lock, fl_list);
+ if (fl->fl_type == F_WRLCK)
+ has_lease = true;
+ }
+ spin_unlock(&ctx->flc_lock);
}
if (has_lease)
@@ -1532,20 +1564,22 @@ int fcntl_getlease(struct file *filp)
{
struct file_lock *fl;
struct inode *inode = file_inode(filp);
+ struct file_lock_context *ctx = inode->i_flctx;
int type = F_UNLCK;
LIST_HEAD(dispose);
- spin_lock(&inode->i_lock);
- time_out_leases(file_inode(filp), &dispose);
- for (fl = file_inode(filp)->i_flock; fl && IS_LEASE(fl);
- fl = fl->fl_next) {
- if (fl->fl_file == filp) {
+ if (ctx && !list_empty_careful(&ctx->flc_lease)) {
+ spin_lock(&ctx->flc_lock);
+ time_out_leases(file_inode(filp), &dispose);
+ list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
+ if (fl->fl_file != filp)
+ continue;
type = target_leasetype(fl);
break;
}
+ spin_unlock(&ctx->flc_lock);
+ locks_dispose_list(&dispose);
}
- spin_unlock(&inode->i_lock);
- locks_dispose_list(&dispose);
return type;
}
@@ -1578,9 +1612,10 @@ check_conflicting_open(const struct dentry *dentry, const long arg)
static int
generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv)
{
- struct file_lock *fl, **before, **my_before = NULL, *lease;
+ struct file_lock *fl, *my_fl = NULL, *lease;
struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
+ struct file_lock_context *ctx;
bool is_deleg = (*flp)->fl_flags & FL_DELEG;
int error;
LIST_HEAD(dispose);
@@ -1588,6 +1623,10 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
lease = *flp;
trace_generic_add_lease(inode, lease);
+ ctx = locks_get_lock_context(inode);
+ if (!ctx)
+ return -ENOMEM;
+
/*
* In the delegation case we need mutual exclusion with
* a number of operations that take the i_mutex. We trylock
@@ -1606,7 +1645,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
return -EINVAL;
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
error = check_conflicting_open(dentry, arg);
if (error)
@@ -1621,13 +1660,12 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
* except for this filp.
*/
error = -EAGAIN;
- for (before = &inode->i_flock;
- ((fl = *before) != NULL) && IS_LEASE(fl);
- before = &fl->fl_next) {
+ list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
if (fl->fl_file == filp) {
- my_before = before;
+ my_fl = fl;
continue;
}
+
/*
* No exclusive leases if someone else has a lease on
* this file:
@@ -1642,9 +1680,8 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
goto out;
}
- if (my_before != NULL) {
- lease = *my_before;
- error = lease->fl_lmops->lm_change(my_before, arg, &dispose);
+ if (my_fl != NULL) {
+ error = lease->fl_lmops->lm_change(my_fl, arg, &dispose);
if (error)
goto out;
goto out_setup;
@@ -1654,7 +1691,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
if (!leases_enable)
goto out;
- locks_insert_lock(before, lease);
+ locks_insert_lock_ctx(lease, &ctx->flc_lease_cnt, &ctx->flc_lease);
/*
* The check in break_lease() is lockless. It's possible for another
* open to race in after we did the earlier check for a conflicting
@@ -1666,45 +1703,49 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
*/
smp_mb();
error = check_conflicting_open(dentry, arg);
- if (error)
- goto out_unlink;
+ if (error) {
+ locks_unlink_lock_ctx(lease, &ctx->flc_lease_cnt);
+ goto out;
+ }
out_setup:
if (lease->fl_lmops->lm_setup)
lease->fl_lmops->lm_setup(lease, priv);
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ctx->flc_lock);
locks_dispose_list(&dispose);
if (is_deleg)
mutex_unlock(&inode->i_mutex);
- if (!error && !my_before)
+ if (!error && !my_fl)
*flp = NULL;
return error;
-out_unlink:
- locks_unlink_lock(before);
- goto out;
}
static int generic_delete_lease(struct file *filp)
{
int error = -EAGAIN;
- struct file_lock *fl, **before;
+ struct file_lock *fl, *victim = NULL;
struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
+ struct file_lock_context *ctx = inode->i_flctx;
LIST_HEAD(dispose);
- spin_lock(&inode->i_lock);
- time_out_leases(inode, &dispose);
- for (before = &inode->i_flock;
- ((fl = *before) != NULL) && IS_LEASE(fl);
- before = &fl->fl_next) {
- if (fl->fl_file == filp)
+ if (!ctx) {
+ trace_generic_delete_lease(inode, NULL);
+ return error;
+ }
+
+ spin_lock(&ctx->flc_lock);
+ list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
+ if (fl->fl_file == filp) {
+ victim = fl;
break;
+ }
}
trace_generic_delete_lease(inode, fl);
- if (fl)
- error = fl->fl_lmops->lm_change(before, F_UNLCK, &dispose);
- spin_unlock(&inode->i_lock);
+ if (victim)
+ error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
+ spin_unlock(&ctx->flc_lock);
locks_dispose_list(&dispose);
return error;
}
@@ -2171,7 +2212,7 @@ again:
*/
/*
* we need that spin_lock here - it prevents reordering between
- * update of inode->i_flock and check for it done in close().
+ * update of i_flctx->flc_posix and check for it done in close().
* rcu_read_lock() wouldn't do.
*/
spin_lock(&current->files->file_lock);
@@ -2331,13 +2372,14 @@ out:
void locks_remove_posix(struct file *filp, fl_owner_t owner)
{
struct file_lock lock;
+ struct file_lock_context *ctx = file_inode(filp)->i_flctx;
/*
* If there are no locks held on this file, we don't need to call
* posix_lock_file(). Another process could be setting a lock on this
* file at the same time, but we wouldn't remove that lock anyway.
*/
- if (!file_inode(filp)->i_flock)
+ if (!ctx || list_empty(&ctx->flc_posix))
return;
lock.fl_type = F_UNLCK;
@@ -2358,67 +2400,67 @@ void locks_remove_posix(struct file *filp, fl_owner_t owner)
EXPORT_SYMBOL(locks_remove_posix);
+/* The i_flctx must be valid when calling into here */
+static void
+locks_remove_flock(struct file *filp)
+{
+ struct file_lock fl = {
+ .fl_owner = filp,
+ .fl_pid = current->tgid,
+ .fl_file = filp,
+ .fl_flags = FL_FLOCK,
+ .fl_type = F_UNLCK,
+ .fl_end = OFFSET_MAX,
+ };
+ struct file_lock_context *flctx = file_inode(filp)->i_flctx;
+
+ if (list_empty(&flctx->flc_flock))
+ return;
+
+ if (filp->f_op->flock)
+ filp->f_op->flock(filp, F_SETLKW, &fl);
+ else
+ flock_lock_file(filp, &fl);
+
+ if (fl.fl_ops && fl.fl_ops->fl_release_private)
+ fl.fl_ops->fl_release_private(&fl);
+}
+
+/* The i_flctx must be valid when calling into here */
+static void
+locks_remove_lease(struct file *filp)
+{
+ struct inode *inode = file_inode(filp);
+ struct file_lock_context *ctx = inode->i_flctx;
+ struct file_lock *fl, *tmp;
+ LIST_HEAD(dispose);
+
+ if (list_empty(&ctx->flc_lease))
+ return;
+
+ spin_lock(&ctx->flc_lock);
+ list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
+ lease_modify(fl, F_UNLCK, &dispose);
+ spin_unlock(&ctx->flc_lock);
+ locks_dispose_list(&dispose);
+}
+
/*
* This function is called on the last close of an open file.
*/
void locks_remove_file(struct file *filp)
{
- struct inode * inode = file_inode(filp);
- struct file_lock *fl;
- struct file_lock **before;
- LIST_HEAD(dispose);
-
- if (!inode->i_flock)
+ if (!file_inode(filp)->i_flctx)
return;
+ /* remove any OFD locks */
locks_remove_posix(filp, filp);
- if (filp->f_op->flock) {
- struct file_lock fl = {
- .fl_owner = filp,
- .fl_pid = current->tgid,
- .fl_file = filp,
- .fl_flags = FL_FLOCK,
- .fl_type = F_UNLCK,
- .fl_end = OFFSET_MAX,
- };
- filp->f_op->flock(filp, F_SETLKW, &fl);
- if (fl.fl_ops && fl.fl_ops->fl_release_private)
- fl.fl_ops->fl_release_private(&fl);
- }
-
- spin_lock(&inode->i_lock);
- before = &inode->i_flock;
-
- while ((fl = *before) != NULL) {
- if (fl->fl_file == filp) {
- if (IS_LEASE(fl)) {
- lease_modify(before, F_UNLCK, &dispose);
- continue;
- }
-
- /*
- * There's a leftover lock on the list of a type that
- * we didn't expect to see. Most likely a classic
- * POSIX lock that ended up not getting released
- * properly, or that raced onto the list somehow. Log
- * some info about it and then just remove it from
- * the list.
- */
- WARN(!IS_FLOCK(fl),
- "leftover lock: dev=%u:%u ino=%lu type=%hhd flags=0x%x start=%lld end=%lld\n",
- MAJOR(inode->i_sb->s_dev),
- MINOR(inode->i_sb->s_dev), inode->i_ino,
- fl->fl_type, fl->fl_flags,
- fl->fl_start, fl->fl_end);
+ /* remove flock locks */
+ locks_remove_flock(filp);
- locks_delete_lock(before, &dispose);
- continue;
- }
- before = &fl->fl_next;
- }
- spin_unlock(&inode->i_lock);
- locks_dispose_list(&dispose);
+ /* remove any leases */
+ locks_remove_lease(filp);
}
/**
@@ -2621,6 +2663,9 @@ static int __init filelock_init(void)
{
int i;
+ flctx_cache = kmem_cache_create("file_lock_ctx",
+ sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL);
+
filelock_cache = kmem_cache_create("file_lock_cache",
sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 7f3f60641344..8cdb2b28a104 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -85,25 +85,30 @@ static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_
{
struct inode *inode = state->inode;
struct file_lock *fl;
+ struct file_lock_context *flctx = inode->i_flctx;
+ struct list_head *list;
int status = 0;
- if (inode->i_flock == NULL)
+ if (flctx == NULL)
goto out;
- /* Protect inode->i_flock using the i_lock */
- spin_lock(&inode->i_lock);
- for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
- if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
- continue;
+ list = &flctx->flc_posix;
+ spin_lock(&flctx->flc_lock);
+restart:
+ list_for_each_entry(fl, list, fl_list) {
if (nfs_file_open_context(fl->fl_file) != ctx)
continue;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&flctx->flc_lock);
status = nfs4_lock_delegation_recall(fl, state, stateid);
if (status < 0)
goto out;
- spin_lock(&inode->i_lock);
+ spin_lock(&flctx->flc_lock);
}
- spin_unlock(&inode->i_lock);
+ if (list == &flctx->flc_posix) {
+ list = &flctx->flc_flock;
+ goto restart;
+ }
+ spin_unlock(&flctx->flc_lock);
out:
return status;
}
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 5194933ed419..a3bb22ab68c5 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1366,49 +1366,55 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
struct nfs_inode *nfsi = NFS_I(inode);
struct file_lock *fl;
int status = 0;
+ struct file_lock_context *flctx = inode->i_flctx;
+ struct list_head *list;
- if (inode->i_flock == NULL)
+ if (flctx == NULL)
return 0;
+ list = &flctx->flc_posix;
+
/* Guard against delegation returns and new lock/unlock calls */
down_write(&nfsi->rwsem);
- /* Protect inode->i_flock using the BKL */
- spin_lock(&inode->i_lock);
- for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
- if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
- continue;
+ spin_lock(&flctx->flc_lock);
+restart:
+ list_for_each_entry(fl, list, fl_list) {
if (nfs_file_open_context(fl->fl_file)->state != state)
continue;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&flctx->flc_lock);
status = ops->recover_lock(state, fl);
switch (status) {
- case 0:
- break;
- case -ESTALE:
- case -NFS4ERR_ADMIN_REVOKED:
- case -NFS4ERR_STALE_STATEID:
- case -NFS4ERR_BAD_STATEID:
- case -NFS4ERR_EXPIRED:
- case -NFS4ERR_NO_GRACE:
- case -NFS4ERR_STALE_CLIENTID:
- case -NFS4ERR_BADSESSION:
- case -NFS4ERR_BADSLOT:
- case -NFS4ERR_BAD_HIGH_SLOT:
- case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
- goto out;
- default:
- printk(KERN_ERR "NFS: %s: unhandled error %d\n",
- __func__, status);
- case -ENOMEM:
- case -NFS4ERR_DENIED:
- case -NFS4ERR_RECLAIM_BAD:
- case -NFS4ERR_RECLAIM_CONFLICT:
- /* kill_proc(fl->fl_pid, SIGLOST, 1); */
- status = 0;
+ case 0:
+ break;
+ case -ESTALE:
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_EXPIRED:
+ case -NFS4ERR_NO_GRACE:
+ case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ goto out;
+ default:
+ pr_err("NFS: %s: unhandled error %d\n",
+ __func__, status);
+ case -ENOMEM:
+ case -NFS4ERR_DENIED:
+ case -NFS4ERR_RECLAIM_BAD:
+ case -NFS4ERR_RECLAIM_CONFLICT:
+ /* kill_proc(fl->fl_pid, SIGLOST, 1); */
+ status = 0;
}
- spin_lock(&inode->i_lock);
+ spin_lock(&flctx->flc_lock);
}
- spin_unlock(&inode->i_lock);
+ if (list == &flctx->flc_posix) {
+ list = &flctx->flc_flock;
+ goto restart;
+ }
+ spin_unlock(&flctx->flc_lock);
out:
up_write(&nfsi->rwsem);
return status;
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 2b5e769beb16..29c7f33c9cf1 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -826,11 +826,15 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
struct nfs_pageio_descriptor *pgio)
{
size_t size;
+ struct file_lock_context *flctx;
if (prev) {
if (!nfs_match_open_context(req->wb_context, prev->wb_context))
return false;
- if (req->wb_context->dentry->d_inode->i_flock != NULL &&
+ flctx = req->wb_context->dentry->d_inode->i_flctx;
+ if (flctx != NULL &&
+ !(list_empty_careful(&flctx->flc_posix) &&
+ list_empty_careful(&flctx->flc_flock)) &&
!nfs_match_lock_context(req->wb_lock_context,
prev->wb_lock_context))
return false;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index af3af685a9e3..4ae66f416eb9 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1091,6 +1091,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
{
struct nfs_open_context *ctx = nfs_file_open_context(file);
struct nfs_lock_context *l_ctx;
+ struct file_lock_context *flctx = file_inode(file)->i_flctx;
struct nfs_page *req;
int do_flush, status;
/*
@@ -1109,7 +1110,9 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
do_flush = req->wb_page != page || req->wb_context != ctx;
/* for now, flush if more than 1 request in page_group */
do_flush |= req->wb_this_page != req;
- if (l_ctx && ctx->dentry->d_inode->i_flock != NULL) {
+ if (l_ctx && flctx &&
+ !(list_empty_careful(&flctx->flc_posix) &&
+ list_empty_careful(&flctx->flc_flock))) {
do_flush |= l_ctx->lockowner.l_owner != current->files
|| l_ctx->lockowner.l_pid != current->tgid;
}
@@ -1170,6 +1173,13 @@ out:
return PageUptodate(page) != 0;
}
+static bool
+is_whole_file_wrlock(struct file_lock *fl)
+{
+ return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX &&
+ fl->fl_type == F_WRLCK;
+}
+
/* If we know the page is up to date, and we're not using byte range locks (or
* if we have the whole file locked for writing), it may be more efficient to
* extend the write to cover the entire page in order to avoid fragmentation
@@ -1180,17 +1190,36 @@ out:
*/
static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
{
+ int ret;
+ struct file_lock_context *flctx = inode->i_flctx;
+ struct file_lock *fl;
+
if (file->f_flags & O_DSYNC)
return 0;
if (!nfs_write_pageuptodate(page, inode))
return 0;
if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
return 1;
- if (inode->i_flock == NULL || (inode->i_flock->fl_start == 0 &&
- inode->i_flock->fl_end == OFFSET_MAX &&
- inode->i_flock->fl_type != F_RDLCK))
- return 1;
- return 0;
+ if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
+ list_empty_careful(&flctx->flc_posix)))
+ return 0;
+
+ /* Check to see if there are whole file write locks */
+ ret = 0;
+ spin_lock(&flctx->flc_lock);
+ if (!list_empty(&flctx->flc_posix)) {
+ fl = list_first_entry(&flctx->flc_posix, struct file_lock,
+ fl_list);
+ if (is_whole_file_wrlock(fl))
+ ret = 1;
+ } else if (!list_empty(&flctx->flc_flock)) {
+ fl = list_first_entry(&flctx->flc_flock, struct file_lock,
+ fl_list);
+ if (fl->fl_type == F_WRLCK)
+ ret = 1;
+ }
+ spin_unlock(&flctx->flc_lock);
+ return ret;
}
/*
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 1f4b85b15125..370a53a5da13 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3472,7 +3472,8 @@ nfsd_break_deleg_cb(struct file_lock *fl)
}
static int
-nfsd_change_deleg_cb(struct file_lock **onlist, int arg, struct list_head *dispose)
+nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
+ struct list_head *dispose)
{
if (arg & F_UNLCK)
return lease_modify(onlist, arg, dispose);
@@ -5551,10 +5552,11 @@ out_nfserr:
static bool
check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
{
- struct file_lock **flpp;
+ struct file_lock *fl;
int status = false;
struct file *filp = find_any_file(fp);
struct inode *inode;
+ struct file_lock_context *flctx;
if (!filp) {
/* Any valid lock stateid should have some sort of access */
@@ -5563,15 +5565,18 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
}
inode = file_inode(filp);
+ flctx = inode->i_flctx;
- spin_lock(&inode->i_lock);
- for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
- if ((*flpp)->fl_owner == (fl_owner_t)lowner) {
- status = true;
- break;
+ if (flctx && !list_empty_careful(&flctx->flc_posix)) {
+ spin_lock(&flctx->flc_lock);
+ list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
+ if (fl->fl_owner == (fl_owner_t)lowner) {
+ status = true;
+ break;
+ }
}
+ spin_unlock(&flctx->flc_lock);
}
- spin_unlock(&inode->i_lock);
fput(filp);
return status;
}
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index c991616acca9..bff8567aa42d 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -259,16 +259,15 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
struct fsnotify_event *kevent;
char __user *start;
int ret;
- DEFINE_WAIT(wait);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
start = buf;
group = file->private_data;
pr_debug("%s: group=%p\n", __func__, group);
+ add_wait_queue(&group->notification_waitq, &wait);
while (1) {
- prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
-
mutex_lock(&group->notification_mutex);
kevent = get_one_event(group, count);
mutex_unlock(&group->notification_mutex);
@@ -289,7 +288,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
if (start != buf)
break;
- schedule();
+
+ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
continue;
}
@@ -318,8 +318,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
buf += ret;
count -= ret;
}
+ remove_wait_queue(&group->notification_waitq, &wait);
- finish_wait(&group->notification_waitq, &wait);
if (start != buf && ret != -EFAULT)
ret = buf - start;
return ret;
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 79b5af5e6a7b..cecd875653e4 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -2023,11 +2023,8 @@ leave:
dlm_lockres_drop_inflight_ref(dlm, res);
spin_unlock(&res->spinlock);
- if (ret < 0) {
+ if (ret < 0)
mlog_errno(ret);
- if (newlock)
- dlm_lock_put(newlock);
- }
return ret;
}
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index b931e04e3388..914c121ec890 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -94,6 +94,14 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
struct inode *inode,
const char *symname);
+static int ocfs2_double_lock(struct ocfs2_super *osb,
+ struct buffer_head **bh1,
+ struct inode *inode1,
+ struct buffer_head **bh2,
+ struct inode *inode2,
+ int rename);
+
+static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2);
/* An orphan dir name is an 8 byte value, printed as a hex string */
#define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64)))
@@ -678,8 +686,10 @@ static int ocfs2_link(struct dentry *old_dentry,
{
handle_t *handle;
struct inode *inode = old_dentry->d_inode;
+ struct inode *old_dir = old_dentry->d_parent->d_inode;
int err;
struct buffer_head *fe_bh = NULL;
+ struct buffer_head *old_dir_bh = NULL;
struct buffer_head *parent_fe_bh = NULL;
struct ocfs2_dinode *fe = NULL;
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
@@ -696,19 +706,33 @@ static int ocfs2_link(struct dentry *old_dentry,
dquot_initialize(dir);
- err = ocfs2_inode_lock_nested(dir, &parent_fe_bh, 1, OI_LS_PARENT);
+ err = ocfs2_double_lock(osb, &old_dir_bh, old_dir,
+ &parent_fe_bh, dir, 0);
if (err < 0) {
if (err != -ENOENT)
mlog_errno(err);
return err;
}
+ /* make sure both dirs have bhs
+ * get an extra ref on old_dir_bh if old==new */
+ if (!parent_fe_bh) {
+ if (old_dir_bh) {
+ parent_fe_bh = old_dir_bh;
+ get_bh(parent_fe_bh);
+ } else {
+ mlog(ML_ERROR, "%s: no old_dir_bh!\n", osb->uuid_str);
+ err = -EIO;
+ goto out;
+ }
+ }
+
if (!dir->i_nlink) {
err = -ENOENT;
goto out;
}
- err = ocfs2_lookup_ino_from_name(dir, old_dentry->d_name.name,
+ err = ocfs2_lookup_ino_from_name(old_dir, old_dentry->d_name.name,
old_dentry->d_name.len, &old_de_ino);
if (err) {
err = -ENOENT;
@@ -801,10 +825,11 @@ out_unlock_inode:
ocfs2_inode_unlock(inode, 1);
out:
- ocfs2_inode_unlock(dir, 1);
+ ocfs2_double_unlock(old_dir, dir);
brelse(fe_bh);
brelse(parent_fe_bh);
+ brelse(old_dir_bh);
ocfs2_free_dir_lookup_result(&lookup);
@@ -1072,14 +1097,15 @@ static int ocfs2_check_if_ancestor(struct ocfs2_super *osb,
}
/*
- * The only place this should be used is rename!
+ * The only place this should be used is rename and link!
* if they have the same id, then the 1st one is the only one locked.
*/
static int ocfs2_double_lock(struct ocfs2_super *osb,
struct buffer_head **bh1,
struct inode *inode1,
struct buffer_head **bh2,
- struct inode *inode2)
+ struct inode *inode2,
+ int rename)
{
int status;
int inode1_is_ancestor, inode2_is_ancestor;
@@ -1127,7 +1153,7 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
}
/* lock id2 */
status = ocfs2_inode_lock_nested(inode2, bh2, 1,
- OI_LS_RENAME1);
+ rename == 1 ? OI_LS_RENAME1 : OI_LS_PARENT);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
@@ -1136,7 +1162,8 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
}
/* lock id1 */
- status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_RENAME2);
+ status = ocfs2_inode_lock_nested(inode1, bh1, 1,
+ rename == 1 ? OI_LS_RENAME2 : OI_LS_PARENT);
if (status < 0) {
/*
* An error return must mean that no cluster locks
@@ -1252,7 +1279,7 @@ static int ocfs2_rename(struct inode *old_dir,
/* if old and new are the same, this'll just do one lock. */
status = ocfs2_double_lock(osb, &old_dir_bh, old_dir,
- &new_dir_bh, new_dir);
+ &new_dir_bh, new_dir, 1);
if (status < 0) {
mlog_errno(status);
goto bail;
diff --git a/fs/read_write.c b/fs/read_write.c
index c0805c93b6fa..4060691e78f7 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -358,7 +358,7 @@ int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t
return retval;
}
- if (unlikely(inode->i_flock && mandatory_lock(inode))) {
+ if (unlikely(inode->i_flctx && mandatory_lock(inode))) {
retval = locks_mandatory_area(
read_write == READ ? FLOCK_VERIFY_READ : FLOCK_VERIFY_WRITE,
inode, file, pos, count);
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 3ca9b751f122..b95dc32a6e6b 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -196,8 +196,8 @@ struct acpi_processor_flags {
struct acpi_processor {
acpi_handle handle;
u32 acpi_id;
- u32 apic_id;
- u32 id;
+ u32 phys_id; /* CPU hardware ID such as APIC ID for x86 */
+ u32 id; /* CPU logical ID allocated by OS */
u32 pblk;
int performance_platform_limit;
int throttling_platform_limit;
@@ -310,8 +310,8 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
#endif /* CONFIG_CPU_FREQ */
/* in processor_core.c */
-int acpi_get_apicid(acpi_handle, int type, u32 acpi_id);
-int acpi_map_cpuid(int apic_id, u32 acpi_id);
+int acpi_get_phys_id(acpi_handle, int type, u32 acpi_id);
+int acpi_map_cpuid(int phys_id, u32 acpi_id);
int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id);
/* in processor_pdc.c */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 08848050922e..db284bff29dc 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -136,8 +136,12 @@ static inline void __tlb_adjust_range(struct mmu_gather *tlb,
static inline void __tlb_reset_range(struct mmu_gather *tlb)
{
- tlb->start = TASK_SIZE;
- tlb->end = 0;
+ if (tlb->fullmm) {
+ tlb->start = tlb->end = ~0;
+ } else {
+ tlb->start = TASK_SIZE;
+ tlb->end = 0;
+ }
}
/*
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 856d381b1d5b..d459cd17b477 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -147,8 +147,8 @@ void acpi_numa_arch_fixup(void);
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* Arch dependent functions for cpu hotplug support */
-int acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu);
-int acpi_unmap_lsapic(int cpu);
+int acpi_map_cpu(acpi_handle handle, int physid, int *pcpu);
+int acpi_unmap_cpu(int cpu);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 8aded9ab2e4e..5735e7130d63 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -34,7 +34,6 @@ struct blk_mq_hw_ctx {
unsigned long flags; /* BLK_MQ_F_* flags */
struct request_queue *queue;
- unsigned int queue_num;
struct blk_flush_queue *fq;
void *driver_data;
@@ -54,7 +53,7 @@ struct blk_mq_hw_ctx {
unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
unsigned int numa_node;
- unsigned int cmd_size; /* per-request extra data */
+ unsigned int queue_num;
atomic_t nr_active;
@@ -195,13 +194,16 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
+int blk_mq_request_started(struct request *rq);
void blk_mq_start_request(struct request *rq);
void blk_mq_end_request(struct request *rq, int error);
void __blk_mq_end_request(struct request *rq, int error);
void blk_mq_requeue_request(struct request *rq);
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
+void blk_mq_cancel_requeue_work(struct request_queue *q);
void blk_mq_kick_requeue_list(struct request_queue *q);
+void blk_mq_abort_requeue_list(struct request_queue *q);
void blk_mq_complete_request(struct request *rq);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
@@ -212,6 +214,8 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
void *priv);
+void blk_mq_unfreeze_queue(struct request_queue *q);
+void blk_mq_freeze_queue_start(struct request_queue *q);
/*
* Driver command data is immediately after the request. So subtract request
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 445d59231bc4..c294e3e25e37 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -190,6 +190,7 @@ enum rq_flag_bits {
__REQ_PM, /* runtime pm request */
__REQ_HASHED, /* on IO scheduler merge hash */
__REQ_MQ_INFLIGHT, /* track inflight for MQ */
+ __REQ_NO_TIMEOUT, /* requests may never expire */
__REQ_NR_BITS, /* stops here */
};
@@ -243,5 +244,6 @@ enum rq_flag_bits {
#define REQ_PM (1ULL << __REQ_PM)
#define REQ_HASHED (1ULL << __REQ_HASHED)
#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
+#define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT)
#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 5d86416d35f2..61b19c46bdb3 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -87,8 +87,8 @@ struct ceph_osd_req_op {
struct ceph_osd_data osd_data;
} extent;
struct {
- __le32 name_len;
- __le32 value_len;
+ u32 name_len;
+ u32 value_len;
__u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */
__u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */
struct ceph_osd_data osd_data;
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index a1c81f80978e..33063f872ee3 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -215,7 +215,7 @@ static __always_inline void __read_once_size(volatile void *p, void *res, int si
}
}
-static __always_inline void __assign_once_size(volatile void *p, void *res, int size)
+static __always_inline void __write_once_size(volatile void *p, void *res, int size)
{
switch (size) {
case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
@@ -235,15 +235,15 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int
/*
* Prevent the compiler from merging or refetching reads or writes. The
* compiler is also forbidden from reordering successive instances of
- * READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the
+ * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
* compiler is aware of some particular ordering. One way to make the
* compiler aware of ordering is to put the two invocations of READ_ONCE,
- * ASSIGN_ONCE or ACCESS_ONCE() in different C statements.
+ * WRITE_ONCE or ACCESS_ONCE() in different C statements.
*
* In contrast to ACCESS_ONCE these two macros will also work on aggregate
* data types like structs or unions. If the size of the accessed data
* type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
- * READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a
+ * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
* compile-time warning.
*
* Their two major use cases are: (1) Mediating communication between
@@ -257,8 +257,8 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int
#define READ_ONCE(x) \
({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
-#define ASSIGN_ONCE(val, x) \
- ({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; })
+#define WRITE_ONCE(x, val) \
+ ({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; })
#endif /* __KERNEL__ */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index f90c0282c114..ddd2fa7cefd3 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -135,7 +135,7 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define FMODE_CAN_WRITE ((__force fmode_t)0x40000)
/* File was opened by fanotify and shouldn't generate fanotify events */
-#define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
+#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
/*
* Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
@@ -625,7 +625,7 @@ struct inode {
atomic_t i_readcount; /* struct files open RO */
#endif
const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
- struct file_lock *i_flock;
+ struct file_lock_context *i_flctx;
struct address_space i_data;
struct list_head i_devices;
union {
@@ -885,6 +885,8 @@ static inline struct file *get_file(struct file *f)
/* legacy typedef, should eventually be removed */
typedef void *fl_owner_t;
+struct file_lock;
+
struct file_lock_operations {
void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
void (*fl_release_private)(struct file_lock *);
@@ -898,7 +900,7 @@ struct lock_manager_operations {
void (*lm_notify)(struct file_lock *); /* unblock callback */
int (*lm_grant)(struct file_lock *, int);
bool (*lm_break)(struct file_lock *);
- int (*lm_change)(struct file_lock **, int, struct list_head *);
+ int (*lm_change)(struct file_lock *, int, struct list_head *);
void (*lm_setup)(struct file_lock *, void **);
};
@@ -923,17 +925,17 @@ int locks_in_grace(struct net *);
* FIXME: should we create a separate "struct lock_request" to help distinguish
* these two uses?
*
- * The i_flock list is ordered by:
+ * The varous i_flctx lists are ordered by:
*
- * 1) lock type -- FL_LEASEs first, then FL_FLOCK, and finally FL_POSIX
- * 2) lock owner
- * 3) lock range start
- * 4) lock range end
+ * 1) lock owner
+ * 2) lock range start
+ * 3) lock range end
*
* Obviously, the last two criteria only matter for POSIX locks.
*/
struct file_lock {
struct file_lock *fl_next; /* singly linked list for this inode */
+ struct list_head fl_list; /* link into file_lock_context */
struct hlist_node fl_link; /* node in global lists */
struct list_head fl_block; /* circular list of blocked processes */
fl_owner_t fl_owner;
@@ -964,6 +966,16 @@ struct file_lock {
} fl_u;
};
+struct file_lock_context {
+ spinlock_t flc_lock;
+ struct list_head flc_flock;
+ struct list_head flc_posix;
+ struct list_head flc_lease;
+ int flc_flock_cnt;
+ int flc_posix_cnt;
+ int flc_lease_cnt;
+};
+
/* The following constant reflects the upper bound of the file/locking space */
#ifndef OFFSET_MAX
#define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1)))
@@ -990,6 +1002,7 @@ extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
extern int fcntl_getlease(struct file *filp);
/* fs/locks.c */
+void locks_free_lock_context(struct file_lock_context *ctx);
void locks_free_lock(struct file_lock *fl);
extern void locks_init_lock(struct file_lock *);
extern struct file_lock * locks_alloc_lock(void);
@@ -1010,7 +1023,7 @@ extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int t
extern void lease_get_mtime(struct inode *, struct timespec *time);
extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
-extern int lease_modify(struct file_lock **, int, struct list_head *);
+extern int lease_modify(struct file_lock *, int, struct list_head *);
#else /* !CONFIG_FILE_LOCKING */
static inline int fcntl_getlk(struct file *file, unsigned int cmd,
struct flock __user *user)
@@ -1047,6 +1060,11 @@ static inline int fcntl_getlease(struct file *filp)
return F_UNLCK;
}
+static inline void
+locks_free_lock_context(struct file_lock_context *ctx)
+{
+}
+
static inline void locks_init_lock(struct file_lock *fl)
{
return;
@@ -1137,7 +1155,7 @@ static inline int vfs_setlease(struct file *filp, long arg,
return -EINVAL;
}
-static inline int lease_modify(struct file_lock **before, int arg,
+static inline int lease_modify(struct file_lock *fl, int arg,
struct list_head *dispose)
{
return -EINVAL;
@@ -1959,7 +1977,7 @@ static inline int locks_verify_truncate(struct inode *inode,
struct file *filp,
loff_t size)
{
- if (inode->i_flock && mandatory_lock(inode))
+ if (inode->i_flctx && mandatory_lock(inode))
return locks_mandatory_area(
FLOCK_VERIFY_WRITE, inode, filp,
size < inode->i_size ? size : inode->i_size,
@@ -1973,11 +1991,12 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
{
/*
* Since this check is lockless, we must ensure that any refcounts
- * taken are done before checking inode->i_flock. Otherwise, we could
- * end up racing with tasks trying to set a new lease on this file.
+ * taken are done before checking i_flctx->flc_lease. Otherwise, we
+ * could end up racing with tasks trying to set a new lease on this
+ * file.
*/
smp_mb();
- if (inode->i_flock)
+ if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
return __break_lease(inode, mode, FL_LEASE);
return 0;
}
@@ -1986,11 +2005,12 @@ static inline int break_deleg(struct inode *inode, unsigned int mode)
{
/*
* Since this check is lockless, we must ensure that any refcounts
- * taken are done before checking inode->i_flock. Otherwise, we could
- * end up racing with tasks trying to set a new lease on this file.
+ * taken are done before checking i_flctx->flc_lease. Otherwise, we
+ * could end up racing with tasks trying to set a new lease on this
+ * file.
*/
smp_mb();
- if (inode->i_flock)
+ if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
return __break_lease(inode, mode, FL_DELEG);
return 0;
}
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 290db1269c4c..75ae2e2631fc 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -13,11 +13,54 @@
* Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com>
*/
+/* Shifted versions of the command enable bits are be used if the command
+ * has no arguments (see kdb_check_flags). This allows commands, such as
+ * go, to have different permissions depending upon whether it is called
+ * with an argument.
+ */
+#define KDB_ENABLE_NO_ARGS_SHIFT 10
+
typedef enum {
- KDB_REPEAT_NONE = 0, /* Do not repeat this command */
- KDB_REPEAT_NO_ARGS, /* Repeat the command without arguments */
- KDB_REPEAT_WITH_ARGS, /* Repeat the command including its arguments */
-} kdb_repeat_t;
+ KDB_ENABLE_ALL = (1 << 0), /* Enable everything */
+ KDB_ENABLE_MEM_READ = (1 << 1),
+ KDB_ENABLE_MEM_WRITE = (1 << 2),
+ KDB_ENABLE_REG_READ = (1 << 3),
+ KDB_ENABLE_REG_WRITE = (1 << 4),
+ KDB_ENABLE_INSPECT = (1 << 5),
+ KDB_ENABLE_FLOW_CTRL = (1 << 6),
+ KDB_ENABLE_SIGNAL = (1 << 7),
+ KDB_ENABLE_REBOOT = (1 << 8),
+ /* User exposed values stop here, all remaining flags are
+ * exclusively used to describe a commands behaviour.
+ */
+
+ KDB_ENABLE_ALWAYS_SAFE = (1 << 9),
+ KDB_ENABLE_MASK = (1 << KDB_ENABLE_NO_ARGS_SHIFT) - 1,
+
+ KDB_ENABLE_ALL_NO_ARGS = KDB_ENABLE_ALL << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_MEM_READ_NO_ARGS = KDB_ENABLE_MEM_READ
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_MEM_WRITE_NO_ARGS = KDB_ENABLE_MEM_WRITE
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_REG_READ_NO_ARGS = KDB_ENABLE_REG_READ
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_REG_WRITE_NO_ARGS = KDB_ENABLE_REG_WRITE
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_INSPECT_NO_ARGS = KDB_ENABLE_INSPECT
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_FLOW_CTRL_NO_ARGS = KDB_ENABLE_FLOW_CTRL
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_SIGNAL_NO_ARGS = KDB_ENABLE_SIGNAL
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_REBOOT_NO_ARGS = KDB_ENABLE_REBOOT
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_ALWAYS_SAFE_NO_ARGS = KDB_ENABLE_ALWAYS_SAFE
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_MASK_NO_ARGS = KDB_ENABLE_MASK << KDB_ENABLE_NO_ARGS_SHIFT,
+
+ KDB_REPEAT_NO_ARGS = 0x40000000, /* Repeat the command w/o arguments */
+ KDB_REPEAT_WITH_ARGS = 0x80000000, /* Repeat the command with args */
+} kdb_cmdflags_t;
typedef int (*kdb_func_t)(int, const char **);
@@ -62,6 +105,7 @@ extern atomic_t kdb_event;
#define KDB_BADLENGTH (-19)
#define KDB_NOBP (-20)
#define KDB_BADADDR (-21)
+#define KDB_NOPERM (-22)
/*
* kdb_diemsg
@@ -146,17 +190,17 @@ static inline const char *kdb_walk_kallsyms(loff_t *pos)
/* Dynamic kdb shell command registration */
extern int kdb_register(char *, kdb_func_t, char *, char *, short);
-extern int kdb_register_repeat(char *, kdb_func_t, char *, char *,
- short, kdb_repeat_t);
+extern int kdb_register_flags(char *, kdb_func_t, char *, char *,
+ short, kdb_cmdflags_t);
extern int kdb_unregister(char *);
#else /* ! CONFIG_KGDB_KDB */
static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
static inline void kdb_init(int level) {}
static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
char *help, short minlen) { return 0; }
-static inline int kdb_register_repeat(char *cmd, kdb_func_t func, char *usage,
- char *help, short minlen,
- kdb_repeat_t repeat) { return 0; }
+static inline int kdb_register_flags(char *cmd, kdb_func_t func, char *usage,
+ char *help, short minlen,
+ kdb_cmdflags_t flags) { return 0; }
static inline int kdb_unregister(char *cmd) { return 0; }
#endif /* CONFIG_KGDB_KDB */
enum {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f80d0194c9bc..80fc92a49649 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1952,7 +1952,7 @@ extern int expand_downwards(struct vm_area_struct *vma,
#if VM_GROWSUP
extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
#else
- #define expand_upwards(vma, address) do { } while (0)
+ #define expand_upwards(vma, address) (0)
#endif
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 375af80bde7d..f767a0de611f 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -137,6 +137,7 @@ struct sdhci_host {
#define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */
#define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */
#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */
+#define SDHCI_HS400_TUNING (1<<13) /* Tuning for HS400 */
unsigned int version; /* SDHCI spec. version */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 679e6e90aa4c..52fd8e8694cf 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -852,11 +852,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* 3. Update dev->stats asynchronously and atomically, and define
* neither operation.
*
- * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid);
+ * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
* If device support VLAN filtering this function is called when a
* VLAN id is registered.
*
- * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
+ * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
* If device support VLAN filtering this function is called when a
* VLAN id is unregistered.
*
@@ -2085,7 +2085,7 @@ extern rwlock_t dev_base_lock; /* Device list lock */
list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_in_bond_rcu(bond, slave) \
for_each_netdev_rcu(&init_net, slave) \
- if (netdev_master_upper_dev_get_rcu(slave) == bond)
+ if (netdev_master_upper_dev_get_rcu(slave) == (bond))
#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
static inline struct net_device *next_net_device(struct net_device *dev)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 486e84ccb1f9..4f7a61ca4b39 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -79,11 +79,6 @@ struct perf_branch_stack {
struct perf_branch_entry entries[0];
};
-struct perf_regs {
- __u64 abi;
- struct pt_regs *regs;
-};
-
struct task_struct;
/*
@@ -610,7 +605,14 @@ struct perf_sample_data {
u32 reserved;
} cpu_entry;
struct perf_callchain_entry *callchain;
+
+ /*
+ * regs_user may point to task_pt_regs or to regs_user_copy, depending
+ * on arch details.
+ */
struct perf_regs regs_user;
+ struct pt_regs regs_user_copy;
+
struct perf_regs regs_intr;
u64 stack_user_size;
} ____cacheline_aligned;
diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h
index 3c73d5fe18be..a5f98d53d732 100644
--- a/include/linux/perf_regs.h
+++ b/include/linux/perf_regs.h
@@ -1,11 +1,19 @@
#ifndef _LINUX_PERF_REGS_H
#define _LINUX_PERF_REGS_H
+struct perf_regs {
+ __u64 abi;
+ struct pt_regs *regs;
+};
+
#ifdef CONFIG_HAVE_PERF_REGS
#include <asm/perf_regs.h>
u64 perf_reg_value(struct pt_regs *regs, int idx);
int perf_reg_validate(u64 mask);
u64 perf_reg_abi(struct task_struct *task);
+void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs,
+ struct pt_regs *regs_user_copy);
#else
static inline u64 perf_reg_value(struct pt_regs *regs, int idx)
{
@@ -21,5 +29,13 @@ static inline u64 perf_reg_abi(struct task_struct *task)
{
return PERF_SAMPLE_REGS_ABI_NONE;
}
+
+static inline void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs,
+ struct pt_regs *regs_user_copy)
+{
+ regs_user->regs = task_pt_regs(current);
+ regs_user->abi = perf_reg_abi(current);
+}
#endif /* CONFIG_HAVE_PERF_REGS */
#endif /* _LINUX_PERF_REGS_H */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index c0c2bce6b0b7..d9d7e7e56352 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -37,6 +37,16 @@ struct anon_vma {
atomic_t refcount;
/*
+ * Count of child anon_vmas and VMAs which points to this anon_vma.
+ *
+ * This counter is used for making decision about reusing anon_vma
+ * instead of forking new one. See comments in function anon_vma_clone.
+ */
+ unsigned degree;
+
+ struct anon_vma *parent; /* Parent of this anon_vma */
+
+ /*
* NOTE: the LSB of the rb_root.rb_node is set by
* mm_take_all_locks() _after_ taking the above lock. So the
* rb_root must only be read/written after taking the above lock
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index a219be961c0a..00048339c23e 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -177,7 +177,6 @@ int write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc, writepage_t writepage,
void *data);
int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
-void set_page_dirty_balance(struct page *page);
void writeback_set_ratelimit(void);
void tag_pages_for_writeback(struct address_space *mapping,
pgoff_t start, pgoff_t end);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 58d719ddaa60..29c7be8808d5 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1270,8 +1270,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
*
* @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the
* driver to indicate that it requires IV generation for this
- * particular key. Setting this flag does not necessarily mean that SKBs
- * will have sufficient tailroom for ICV or MIC.
+ * particular key.
* @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by
* the driver for a TKIP key if it requires Michael MIC
* generation in software.
@@ -1283,9 +1282,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
* @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver
* if space should be prepared for the IV, but the IV
* itself should not be generated. Do not set together with
- * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. Setting this flag does
- * not necessarily mean that SKBs will have sufficient tailroom for ICV or
- * MIC.
+ * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key.
* @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received
* management frames. The flag can help drivers that have a hardware
* crypto implementation that doesn't deal with management frames
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 430cfaf92285..db81c65b8f48 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -135,7 +135,6 @@ int se_dev_set_is_nonrot(struct se_device *, int);
int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
int se_dev_set_queue_depth(struct se_device *, u32);
int se_dev_set_max_sectors(struct se_device *, u32);
-int se_dev_set_fabric_max_sectors(struct se_device *, u32);
int se_dev_set_optimal_sectors(struct se_device *, u32);
int se_dev_set_block_size(struct se_device *, u32);
diff --git a/include/target/target_core_backend_configfs.h b/include/target/target_core_backend_configfs.h
index 3247d7530107..186f7a923570 100644
--- a/include/target/target_core_backend_configfs.h
+++ b/include/target/target_core_backend_configfs.h
@@ -98,8 +98,6 @@ static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name
TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors); \
TB_DEV_ATTR_RO(_backend, hw_max_sectors); \
- DEF_TB_DEV_ATTRIB(_backend, fabric_max_sectors); \
- TB_DEV_ATTR(_backend, fabric_max_sectors, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, optimal_sectors); \
TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth); \
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 397fb635766a..4a8795a87b9e 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -77,8 +77,6 @@
#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
/* Default max_write_same_len, disabled by default */
#define DA_MAX_WRITE_SAME_LEN 0
-/* Default max transfer length */
-#define DA_FABRIC_MAX_SECTORS 8192
/* Use a model alias based on the configfs backend device name */
#define DA_EMULATE_MODEL_ALIAS 0
/* Emulation for Direct Page Out */
@@ -694,7 +692,6 @@ struct se_dev_attrib {
u32 hw_block_size;
u32 block_size;
u32 hw_max_sectors;
- u32 fabric_max_sectors;
u32 optimal_sectors;
u32 hw_queue_depth;
u32 queue_depth;
diff --git a/include/uapi/asm-generic/fcntl.h b/include/uapi/asm-generic/fcntl.h
index 7543b3e51331..e063effe0cc1 100644
--- a/include/uapi/asm-generic/fcntl.h
+++ b/include/uapi/asm-generic/fcntl.h
@@ -5,7 +5,7 @@
/*
* FMODE_EXEC is 0x20
- * FMODE_NONOTIFY is 0x1000000
+ * FMODE_NONOTIFY is 0x4000000
* These cannot be used by userspace O_* until internal and external open
* flags are split.
* -Eric Paris
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index 7acef41fc209..af94f31e33ac 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -128,27 +128,34 @@ struct kfd_ioctl_get_process_apertures_args {
uint32_t pad;
};
-#define KFD_IOC_MAGIC 'K'
+#define AMDKFD_IOCTL_BASE 'K'
+#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
+#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
+#define AMDKFD_IOW(nr, type) _IOW(AMDKFD_IOCTL_BASE, nr, type)
+#define AMDKFD_IOWR(nr, type) _IOWR(AMDKFD_IOCTL_BASE, nr, type)
-#define KFD_IOC_GET_VERSION \
- _IOR(KFD_IOC_MAGIC, 1, struct kfd_ioctl_get_version_args)
+#define AMDKFD_IOC_GET_VERSION \
+ AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args)
-#define KFD_IOC_CREATE_QUEUE \
- _IOWR(KFD_IOC_MAGIC, 2, struct kfd_ioctl_create_queue_args)
+#define AMDKFD_IOC_CREATE_QUEUE \
+ AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args)
-#define KFD_IOC_DESTROY_QUEUE \
- _IOWR(KFD_IOC_MAGIC, 3, struct kfd_ioctl_destroy_queue_args)
+#define AMDKFD_IOC_DESTROY_QUEUE \
+ AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args)
-#define KFD_IOC_SET_MEMORY_POLICY \
- _IOW(KFD_IOC_MAGIC, 4, struct kfd_ioctl_set_memory_policy_args)
+#define AMDKFD_IOC_SET_MEMORY_POLICY \
+ AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args)
-#define KFD_IOC_GET_CLOCK_COUNTERS \
- _IOWR(KFD_IOC_MAGIC, 5, struct kfd_ioctl_get_clock_counters_args)
+#define AMDKFD_IOC_GET_CLOCK_COUNTERS \
+ AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args)
-#define KFD_IOC_GET_PROCESS_APERTURES \
- _IOR(KFD_IOC_MAGIC, 6, struct kfd_ioctl_get_process_apertures_args)
+#define AMDKFD_IOC_GET_PROCESS_APERTURES \
+ AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args)
-#define KFD_IOC_UPDATE_QUEUE \
- _IOW(KFD_IOC_MAGIC, 7, struct kfd_ioctl_update_queue_args)
+#define AMDKFD_IOC_UPDATE_QUEUE \
+ AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args)
+
+#define AMDKFD_COMMAND_START 0x01
+#define AMDKFD_COMMAND_END 0x08
#endif
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 3a6dcaa359b7..f714e8633352 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -174,6 +174,10 @@ enum ovs_packet_attr {
OVS_PACKET_ATTR_USERDATA, /* OVS_ACTION_ATTR_USERSPACE arg. */
OVS_PACKET_ATTR_EGRESS_TUN_KEY, /* Nested OVS_TUNNEL_KEY_ATTR_*
attributes. */
+ OVS_PACKET_ATTR_UNUSED1,
+ OVS_PACKET_ATTR_UNUSED2,
+ OVS_PACKET_ATTR_PROBE, /* Packet operation is a feature probe,
+ error logging should be suppressed. */
__OVS_PACKET_ATTR_MAX
};
diff --git a/include/xen/interface/nmi.h b/include/xen/interface/nmi.h
new file mode 100644
index 000000000000..b47d9d06fade
--- /dev/null
+++ b/include/xen/interface/nmi.h
@@ -0,0 +1,51 @@
+/******************************************************************************
+ * nmi.h
+ *
+ * NMI callback registration and reason codes.
+ *
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_NMI_H__
+#define __XEN_PUBLIC_NMI_H__
+
+#include <xen/interface/xen.h>
+
+/*
+ * NMI reason codes:
+ * Currently these are x86-specific, stored in arch_shared_info.nmi_reason.
+ */
+ /* I/O-check error reported via ISA port 0x61, bit 6. */
+#define _XEN_NMIREASON_io_error 0
+#define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error)
+ /* PCI SERR reported via ISA port 0x61, bit 7. */
+#define _XEN_NMIREASON_pci_serr 1
+#define XEN_NMIREASON_pci_serr (1UL << _XEN_NMIREASON_pci_serr)
+ /* Unknown hardware-generated NMI. */
+#define _XEN_NMIREASON_unknown 2
+#define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown)
+
+/*
+ * long nmi_op(unsigned int cmd, void *arg)
+ * NB. All ops return zero on success, else a negative error code.
+ */
+
+/*
+ * Register NMI callback for this (calling) VCPU. Currently this only makes
+ * sense for domain 0, vcpu 0. All other callers will be returned EINVAL.
+ * arg == pointer to xennmi_callback structure.
+ */
+#define XENNMI_register_callback 0
+struct xennmi_callback {
+ unsigned long handler_address;
+ unsigned long pad;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xennmi_callback);
+
+/*
+ * Deregister NMI callback for this (calling) VCPU.
+ * arg == NULL.
+ */
+#define XENNMI_unregister_callback 1
+
+#endif /* __XEN_PUBLIC_NMI_H__ */
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 1adf62b39b96..07ce18ca71e0 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -27,6 +27,9 @@
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
+
+#define pr_fmt(fmt) "KGDB: " fmt
+
#include <linux/pid_namespace.h>
#include <linux/clocksource.h>
#include <linux/serial_core.h>
@@ -196,8 +199,8 @@ int __weak kgdb_validate_break_address(unsigned long addr)
return err;
err = kgdb_arch_remove_breakpoint(&tmp);
if (err)
- printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
- "memory destroyed at: %lx", addr);
+ pr_err("Critical breakpoint error, kernel memory destroyed at: %lx\n",
+ addr);
return err;
}
@@ -256,8 +259,8 @@ int dbg_activate_sw_breakpoints(void)
error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
if (error) {
ret = error;
- printk(KERN_INFO "KGDB: BP install failed: %lx",
- kgdb_break[i].bpt_addr);
+ pr_info("BP install failed: %lx\n",
+ kgdb_break[i].bpt_addr);
continue;
}
@@ -319,8 +322,8 @@ int dbg_deactivate_sw_breakpoints(void)
continue;
error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
if (error) {
- printk(KERN_INFO "KGDB: BP remove failed: %lx\n",
- kgdb_break[i].bpt_addr);
+ pr_info("BP remove failed: %lx\n",
+ kgdb_break[i].bpt_addr);
ret = error;
}
@@ -367,7 +370,7 @@ int dbg_remove_all_break(void)
goto setundefined;
error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
if (error)
- printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
+ pr_err("breakpoint remove failed: %lx\n",
kgdb_break[i].bpt_addr);
setundefined:
kgdb_break[i].state = BP_UNDEFINED;
@@ -400,9 +403,9 @@ static int kgdb_io_ready(int print_wait)
if (print_wait) {
#ifdef CONFIG_KGDB_KDB
if (!dbg_kdb_mode)
- printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n");
+ pr_crit("waiting... or $3#33 for KDB\n");
#else
- printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
+ pr_crit("Waiting for remote debugger\n");
#endif
}
return 1;
@@ -430,8 +433,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks)
exception_level = 0;
kgdb_skipexception(ks->ex_vector, ks->linux_regs);
dbg_activate_sw_breakpoints();
- printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n",
- addr);
+ pr_crit("re-enter error: breakpoint removed %lx\n", addr);
WARN_ON_ONCE(1);
return 1;
@@ -444,7 +446,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks)
panic("Recursive entry to debugger");
}
- printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n");
+ pr_crit("re-enter exception: ALL breakpoints killed\n");
#ifdef CONFIG_KGDB_KDB
/* Allow kdb to debug itself one level */
return 0;
@@ -471,6 +473,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
int cpu;
int trace_on = 0;
int online_cpus = num_online_cpus();
+ u64 time_left;
kgdb_info[ks->cpu].enter_kgdb++;
kgdb_info[ks->cpu].exception_state |= exception_state;
@@ -595,9 +598,13 @@ return_normal:
/*
* Wait for the other CPUs to be notified and be waiting for us:
*/
- while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) +
- atomic_read(&slaves_in_kgdb)) != online_cpus)
+ time_left = loops_per_jiffy * HZ;
+ while (kgdb_do_roundup && --time_left &&
+ (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) !=
+ online_cpus)
cpu_relax();
+ if (!time_left)
+ pr_crit("KGDB: Timed out waiting for secondary CPUs.\n");
/*
* At this point the primary processor is completely
@@ -795,15 +802,15 @@ static struct console kgdbcons = {
static void sysrq_handle_dbg(int key)
{
if (!dbg_io_ops) {
- printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
+ pr_crit("ERROR: No KGDB I/O module available\n");
return;
}
if (!kgdb_connected) {
#ifdef CONFIG_KGDB_KDB
if (!dbg_kdb_mode)
- printk(KERN_CRIT "KGDB or $3#33 for KDB\n");
+ pr_crit("KGDB or $3#33 for KDB\n");
#else
- printk(KERN_CRIT "Entering KGDB\n");
+ pr_crit("Entering KGDB\n");
#endif
}
@@ -945,7 +952,7 @@ static void kgdb_initial_breakpoint(void)
{
kgdb_break_asap = 0;
- printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n");
+ pr_crit("Waiting for connection from remote gdb...\n");
kgdb_breakpoint();
}
@@ -964,8 +971,7 @@ int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
if (dbg_io_ops) {
spin_unlock(&kgdb_registration_lock);
- printk(KERN_ERR "kgdb: Another I/O driver is already "
- "registered with KGDB.\n");
+ pr_err("Another I/O driver is already registered with KGDB\n");
return -EBUSY;
}
@@ -981,8 +987,7 @@ int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
spin_unlock(&kgdb_registration_lock);
- printk(KERN_INFO "kgdb: Registered I/O driver %s.\n",
- new_dbg_io_ops->name);
+ pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name);
/* Arm KGDB now. */
kgdb_register_callbacks();
@@ -1017,8 +1022,7 @@ void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
spin_unlock(&kgdb_registration_lock);
- printk(KERN_INFO
- "kgdb: Unregistered I/O driver %s, debugger disabled.\n",
+ pr_info("Unregistered I/O driver %s, debugger disabled\n",
old_dbg_io_ops->name);
}
EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c
index b20d544f20c2..e1dbf4a2c69e 100644
--- a/kernel/debug/kdb/kdb_bp.c
+++ b/kernel/debug/kdb/kdb_bp.c
@@ -531,22 +531,29 @@ void __init kdb_initbptab(void)
for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++)
bp->bp_free = 1;
- kdb_register_repeat("bp", kdb_bp, "[<vaddr>]",
- "Set/Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
- kdb_register_repeat("bl", kdb_bp, "[<vaddr>]",
- "Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
+ kdb_register_flags("bp", kdb_bp, "[<vaddr>]",
+ "Set/Display breakpoints", 0,
+ KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
+ kdb_register_flags("bl", kdb_bp, "[<vaddr>]",
+ "Display breakpoints", 0,
+ KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT)
- kdb_register_repeat("bph", kdb_bp, "[<vaddr>]",
- "[datar [length]|dataw [length]] Set hw brk", 0, KDB_REPEAT_NO_ARGS);
- kdb_register_repeat("bc", kdb_bc, "<bpnum>",
- "Clear Breakpoint", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("be", kdb_bc, "<bpnum>",
- "Enable Breakpoint", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("bd", kdb_bc, "<bpnum>",
- "Disable Breakpoint", 0, KDB_REPEAT_NONE);
-
- kdb_register_repeat("ss", kdb_ss, "",
- "Single Step", 1, KDB_REPEAT_NO_ARGS);
+ kdb_register_flags("bph", kdb_bp, "[<vaddr>]",
+ "[datar [length]|dataw [length]] Set hw brk", 0,
+ KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
+ kdb_register_flags("bc", kdb_bc, "<bpnum>",
+ "Clear Breakpoint", 0,
+ KDB_ENABLE_FLOW_CTRL);
+ kdb_register_flags("be", kdb_bc, "<bpnum>",
+ "Enable Breakpoint", 0,
+ KDB_ENABLE_FLOW_CTRL);
+ kdb_register_flags("bd", kdb_bc, "<bpnum>",
+ "Disable Breakpoint", 0,
+ KDB_ENABLE_FLOW_CTRL);
+
+ kdb_register_flags("ss", kdb_ss, "",
+ "Single Step", 1,
+ KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
/*
* Architecture dependent initialization.
*/
diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c
index 8859ca34dcfe..15e1a7af5dd0 100644
--- a/kernel/debug/kdb/kdb_debugger.c
+++ b/kernel/debug/kdb/kdb_debugger.c
@@ -129,6 +129,10 @@ int kdb_stub(struct kgdb_state *ks)
ks->pass_exception = 1;
KDB_FLAG_SET(CATASTROPHIC);
}
+ /* set CATASTROPHIC if the system contains unresponsive processors */
+ for_each_online_cpu(i)
+ if (!kgdb_info[i].enter_kgdb)
+ KDB_FLAG_SET(CATASTROPHIC);
if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) {
KDB_STATE_CLEAR(SSBPT);
KDB_STATE_CLEAR(DOING_SS);
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 379650b984f8..f191bddf64b8 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -12,6 +12,7 @@
*/
#include <linux/ctype.h>
+#include <linux/types.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/kmsg_dump.h>
@@ -23,6 +24,7 @@
#include <linux/vmalloc.h>
#include <linux/atomic.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/kallsyms.h>
@@ -42,6 +44,12 @@
#include <linux/slab.h>
#include "kdb_private.h"
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "kdb."
+
+static int kdb_cmd_enabled = CONFIG_KDB_DEFAULT_ENABLE;
+module_param_named(cmd_enable, kdb_cmd_enabled, int, 0600);
+
#define GREP_LEN 256
char kdb_grep_string[GREP_LEN];
int kdb_grepping_flag;
@@ -121,6 +129,7 @@ static kdbmsg_t kdbmsgs[] = {
KDBMSG(BADLENGTH, "Invalid length field"),
KDBMSG(NOBP, "No Breakpoint exists"),
KDBMSG(BADADDR, "Invalid address"),
+ KDBMSG(NOPERM, "Permission denied"),
};
#undef KDBMSG
@@ -188,6 +197,26 @@ struct task_struct *kdb_curr_task(int cpu)
}
/*
+ * Check whether the flags of the current command and the permissions
+ * of the kdb console has allow a command to be run.
+ */
+static inline bool kdb_check_flags(kdb_cmdflags_t flags, int permissions,
+ bool no_args)
+{
+ /* permissions comes from userspace so needs massaging slightly */
+ permissions &= KDB_ENABLE_MASK;
+ permissions |= KDB_ENABLE_ALWAYS_SAFE;
+
+ /* some commands change group when launched with no arguments */
+ if (no_args)
+ permissions |= permissions << KDB_ENABLE_NO_ARGS_SHIFT;
+
+ flags |= KDB_ENABLE_ALL;
+
+ return permissions & flags;
+}
+
+/*
* kdbgetenv - This function will return the character string value of
* an environment variable.
* Parameters:
@@ -476,6 +505,15 @@ int kdbgetaddrarg(int argc, const char **argv, int *nextarg,
kdb_symtab_t symtab;
/*
+ * If the enable flags prohibit both arbitrary memory access
+ * and flow control then there are no reasonable grounds to
+ * provide symbol lookup.
+ */
+ if (!kdb_check_flags(KDB_ENABLE_MEM_READ | KDB_ENABLE_FLOW_CTRL,
+ kdb_cmd_enabled, false))
+ return KDB_NOPERM;
+
+ /*
* Process arguments which follow the following syntax:
*
* symbol | numeric-address [+/- numeric-offset]
@@ -641,8 +679,13 @@ static int kdb_defcmd2(const char *cmdstr, const char *argv0)
if (!s->count)
s->usable = 0;
if (s->usable)
- kdb_register(s->name, kdb_exec_defcmd,
- s->usage, s->help, 0);
+ /* macros are always safe because when executed each
+ * internal command re-enters kdb_parse() and is
+ * safety checked individually.
+ */
+ kdb_register_flags(s->name, kdb_exec_defcmd, s->usage,
+ s->help, 0,
+ KDB_ENABLE_ALWAYS_SAFE);
return 0;
}
if (!s->usable)
@@ -1003,25 +1046,22 @@ int kdb_parse(const char *cmdstr)
if (i < kdb_max_commands) {
int result;
+
+ if (!kdb_check_flags(tp->cmd_flags, kdb_cmd_enabled, argc <= 1))
+ return KDB_NOPERM;
+
KDB_STATE_SET(CMD);
result = (*tp->cmd_func)(argc-1, (const char **)argv);
if (result && ignore_errors && result > KDB_CMD_GO)
result = 0;
KDB_STATE_CLEAR(CMD);
- switch (tp->cmd_repeat) {
- case KDB_REPEAT_NONE:
- argc = 0;
- if (argv[0])
- *(argv[0]) = '\0';
- break;
- case KDB_REPEAT_NO_ARGS:
- argc = 1;
- if (argv[1])
- *(argv[1]) = '\0';
- break;
- case KDB_REPEAT_WITH_ARGS:
- break;
- }
+
+ if (tp->cmd_flags & KDB_REPEAT_WITH_ARGS)
+ return result;
+
+ argc = tp->cmd_flags & KDB_REPEAT_NO_ARGS ? 1 : 0;
+ if (argv[argc])
+ *(argv[argc]) = '\0';
return result;
}
@@ -1921,10 +1961,14 @@ static int kdb_rm(int argc, const char **argv)
*/
static int kdb_sr(int argc, const char **argv)
{
+ bool check_mask =
+ !kdb_check_flags(KDB_ENABLE_ALL, kdb_cmd_enabled, false);
+
if (argc != 1)
return KDB_ARGCOUNT;
+
kdb_trap_printk++;
- __handle_sysrq(*argv[1], false);
+ __handle_sysrq(*argv[1], check_mask);
kdb_trap_printk--;
return 0;
@@ -2157,6 +2201,8 @@ static void kdb_cpu_status(void)
for (start_cpu = -1, i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i)) {
state = 'F'; /* cpu is offline */
+ } else if (!kgdb_info[i].enter_kgdb) {
+ state = 'D'; /* cpu is online but unresponsive */
} else {
state = ' '; /* cpu is responding to kdb */
if (kdb_task_state_char(KDB_TSK(i)) == 'I')
@@ -2210,7 +2256,7 @@ static int kdb_cpu(int argc, const char **argv)
/*
* Validate cpunum
*/
- if ((cpunum > NR_CPUS) || !cpu_online(cpunum))
+ if ((cpunum > NR_CPUS) || !kgdb_info[cpunum].enter_kgdb)
return KDB_BADCPUNUM;
dbg_switch_cpu = cpunum;
@@ -2375,6 +2421,8 @@ static int kdb_help(int argc, const char **argv)
return 0;
if (!kt->cmd_name)
continue;
+ if (!kdb_check_flags(kt->cmd_flags, kdb_cmd_enabled, true))
+ continue;
if (strlen(kt->cmd_usage) > 20)
space = "\n ";
kdb_printf("%-15.15s %-20s%s%s\n", kt->cmd_name,
@@ -2629,7 +2677,7 @@ static int kdb_grep_help(int argc, const char **argv)
}
/*
- * kdb_register_repeat - This function is used to register a kernel
+ * kdb_register_flags - This function is used to register a kernel
* debugger command.
* Inputs:
* cmd Command name
@@ -2641,12 +2689,12 @@ static int kdb_grep_help(int argc, const char **argv)
* zero for success, one if a duplicate command.
*/
#define kdb_command_extend 50 /* arbitrary */
-int kdb_register_repeat(char *cmd,
- kdb_func_t func,
- char *usage,
- char *help,
- short minlen,
- kdb_repeat_t repeat)
+int kdb_register_flags(char *cmd,
+ kdb_func_t func,
+ char *usage,
+ char *help,
+ short minlen,
+ kdb_cmdflags_t flags)
{
int i;
kdbtab_t *kp;
@@ -2694,19 +2742,18 @@ int kdb_register_repeat(char *cmd,
kp->cmd_func = func;
kp->cmd_usage = usage;
kp->cmd_help = help;
- kp->cmd_flags = 0;
kp->cmd_minlen = minlen;
- kp->cmd_repeat = repeat;
+ kp->cmd_flags = flags;
return 0;
}
-EXPORT_SYMBOL_GPL(kdb_register_repeat);
+EXPORT_SYMBOL_GPL(kdb_register_flags);
/*
* kdb_register - Compatibility register function for commands that do
* not need to specify a repeat state. Equivalent to
- * kdb_register_repeat with KDB_REPEAT_NONE.
+ * kdb_register_flags with flags set to 0.
* Inputs:
* cmd Command name
* func Function to execute the command
@@ -2721,8 +2768,7 @@ int kdb_register(char *cmd,
char *help,
short minlen)
{
- return kdb_register_repeat(cmd, func, usage, help, minlen,
- KDB_REPEAT_NONE);
+ return kdb_register_flags(cmd, func, usage, help, minlen, 0);
}
EXPORT_SYMBOL_GPL(kdb_register);
@@ -2764,80 +2810,109 @@ static void __init kdb_inittab(void)
for_each_kdbcmd(kp, i)
kp->cmd_name = NULL;
- kdb_register_repeat("md", kdb_md, "<vaddr>",
+ kdb_register_flags("md", kdb_md, "<vaddr>",
"Display Memory Contents, also mdWcN, e.g. md8c1", 1,
- KDB_REPEAT_NO_ARGS);
- kdb_register_repeat("mdr", kdb_md, "<vaddr> <bytes>",
- "Display Raw Memory", 0, KDB_REPEAT_NO_ARGS);
- kdb_register_repeat("mdp", kdb_md, "<paddr> <bytes>",
- "Display Physical Memory", 0, KDB_REPEAT_NO_ARGS);
- kdb_register_repeat("mds", kdb_md, "<vaddr>",
- "Display Memory Symbolically", 0, KDB_REPEAT_NO_ARGS);
- kdb_register_repeat("mm", kdb_mm, "<vaddr> <contents>",
- "Modify Memory Contents", 0, KDB_REPEAT_NO_ARGS);
- kdb_register_repeat("go", kdb_go, "[<vaddr>]",
- "Continue Execution", 1, KDB_REPEAT_NONE);
- kdb_register_repeat("rd", kdb_rd, "",
- "Display Registers", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("rm", kdb_rm, "<reg> <contents>",
- "Modify Registers", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("ef", kdb_ef, "<vaddr>",
- "Display exception frame", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("bt", kdb_bt, "[<vaddr>]",
- "Stack traceback", 1, KDB_REPEAT_NONE);
- kdb_register_repeat("btp", kdb_bt, "<pid>",
- "Display stack for process <pid>", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]",
- "Backtrace all processes matching state flag", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("btc", kdb_bt, "",
- "Backtrace current process on each cpu", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("btt", kdb_bt, "<vaddr>",
+ KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
+ kdb_register_flags("mdr", kdb_md, "<vaddr> <bytes>",
+ "Display Raw Memory", 0,
+ KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
+ kdb_register_flags("mdp", kdb_md, "<paddr> <bytes>",
+ "Display Physical Memory", 0,
+ KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
+ kdb_register_flags("mds", kdb_md, "<vaddr>",
+ "Display Memory Symbolically", 0,
+ KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
+ kdb_register_flags("mm", kdb_mm, "<vaddr> <contents>",
+ "Modify Memory Contents", 0,
+ KDB_ENABLE_MEM_WRITE | KDB_REPEAT_NO_ARGS);
+ kdb_register_flags("go", kdb_go, "[<vaddr>]",
+ "Continue Execution", 1,
+ KDB_ENABLE_REG_WRITE | KDB_ENABLE_ALWAYS_SAFE_NO_ARGS);
+ kdb_register_flags("rd", kdb_rd, "",
+ "Display Registers", 0,
+ KDB_ENABLE_REG_READ);
+ kdb_register_flags("rm", kdb_rm, "<reg> <contents>",
+ "Modify Registers", 0,
+ KDB_ENABLE_REG_WRITE);
+ kdb_register_flags("ef", kdb_ef, "<vaddr>",
+ "Display exception frame", 0,
+ KDB_ENABLE_MEM_READ);
+ kdb_register_flags("bt", kdb_bt, "[<vaddr>]",
+ "Stack traceback", 1,
+ KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS);
+ kdb_register_flags("btp", kdb_bt, "<pid>",
+ "Display stack for process <pid>", 0,
+ KDB_ENABLE_INSPECT);
+ kdb_register_flags("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]",
+ "Backtrace all processes matching state flag", 0,
+ KDB_ENABLE_INSPECT);
+ kdb_register_flags("btc", kdb_bt, "",
+ "Backtrace current process on each cpu", 0,
+ KDB_ENABLE_INSPECT);
+ kdb_register_flags("btt", kdb_bt, "<vaddr>",
"Backtrace process given its struct task address", 0,
- KDB_REPEAT_NONE);
- kdb_register_repeat("env", kdb_env, "",
- "Show environment variables", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("set", kdb_set, "",
- "Set environment variables", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("help", kdb_help, "",
- "Display Help Message", 1, KDB_REPEAT_NONE);
- kdb_register_repeat("?", kdb_help, "",
- "Display Help Message", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("cpu", kdb_cpu, "<cpunum>",
- "Switch to new cpu", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("kgdb", kdb_kgdb, "",
- "Enter kgdb mode", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("ps", kdb_ps, "[<flags>|A]",
- "Display active task list", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("pid", kdb_pid, "<pidnum>",
- "Switch to another task", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("reboot", kdb_reboot, "",
- "Reboot the machine immediately", 0, KDB_REPEAT_NONE);
+ KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS);
+ kdb_register_flags("env", kdb_env, "",
+ "Show environment variables", 0,
+ KDB_ENABLE_ALWAYS_SAFE);
+ kdb_register_flags("set", kdb_set, "",
+ "Set environment variables", 0,
+ KDB_ENABLE_ALWAYS_SAFE);
+ kdb_register_flags("help", kdb_help, "",
+ "Display Help Message", 1,
+ KDB_ENABLE_ALWAYS_SAFE);
+ kdb_register_flags("?", kdb_help, "",
+ "Display Help Message", 0,
+ KDB_ENABLE_ALWAYS_SAFE);
+ kdb_register_flags("cpu", kdb_cpu, "<cpunum>",
+ "Switch to new cpu", 0,
+ KDB_ENABLE_ALWAYS_SAFE_NO_ARGS);
+ kdb_register_flags("kgdb", kdb_kgdb, "",
+ "Enter kgdb mode", 0, 0);
+ kdb_register_flags("ps", kdb_ps, "[<flags>|A]",
+ "Display active task list", 0,
+ KDB_ENABLE_INSPECT);
+ kdb_register_flags("pid", kdb_pid, "<pidnum>",
+ "Switch to another task", 0,
+ KDB_ENABLE_INSPECT);
+ kdb_register_flags("reboot", kdb_reboot, "",
+ "Reboot the machine immediately", 0,
+ KDB_ENABLE_REBOOT);
#if defined(CONFIG_MODULES)
- kdb_register_repeat("lsmod", kdb_lsmod, "",
- "List loaded kernel modules", 0, KDB_REPEAT_NONE);
+ kdb_register_flags("lsmod", kdb_lsmod, "",
+ "List loaded kernel modules", 0,
+ KDB_ENABLE_INSPECT);
#endif
#if defined(CONFIG_MAGIC_SYSRQ)
- kdb_register_repeat("sr", kdb_sr, "<key>",
- "Magic SysRq key", 0, KDB_REPEAT_NONE);
+ kdb_register_flags("sr", kdb_sr, "<key>",
+ "Magic SysRq key", 0,
+ KDB_ENABLE_ALWAYS_SAFE);
#endif
#if defined(CONFIG_PRINTK)
- kdb_register_repeat("dmesg", kdb_dmesg, "[lines]",
- "Display syslog buffer", 0, KDB_REPEAT_NONE);
+ kdb_register_flags("dmesg", kdb_dmesg, "[lines]",
+ "Display syslog buffer", 0,
+ KDB_ENABLE_ALWAYS_SAFE);
#endif
if (arch_kgdb_ops.enable_nmi) {
- kdb_register_repeat("disable_nmi", kdb_disable_nmi, "",
- "Disable NMI entry to KDB", 0, KDB_REPEAT_NONE);
- }
- kdb_register_repeat("defcmd", kdb_defcmd, "name \"usage\" \"help\"",
- "Define a set of commands, down to endefcmd", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("kill", kdb_kill, "<-signal> <pid>",
- "Send a signal to a process", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("summary", kdb_summary, "",
- "Summarize the system", 4, KDB_REPEAT_NONE);
- kdb_register_repeat("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]",
- "Display per_cpu variables", 3, KDB_REPEAT_NONE);
- kdb_register_repeat("grephelp", kdb_grep_help, "",
- "Display help on | grep", 0, KDB_REPEAT_NONE);
+ kdb_register_flags("disable_nmi", kdb_disable_nmi, "",
+ "Disable NMI entry to KDB", 0,
+ KDB_ENABLE_ALWAYS_SAFE);
+ }
+ kdb_register_flags("defcmd", kdb_defcmd, "name \"usage\" \"help\"",
+ "Define a set of commands, down to endefcmd", 0,
+ KDB_ENABLE_ALWAYS_SAFE);
+ kdb_register_flags("kill", kdb_kill, "<-signal> <pid>",
+ "Send a signal to a process", 0,
+ KDB_ENABLE_SIGNAL);
+ kdb_register_flags("summary", kdb_summary, "",
+ "Summarize the system", 4,
+ KDB_ENABLE_ALWAYS_SAFE);
+ kdb_register_flags("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]",
+ "Display per_cpu variables", 3,
+ KDB_ENABLE_MEM_READ);
+ kdb_register_flags("grephelp", kdb_grep_help, "",
+ "Display help on | grep", 0,
+ KDB_ENABLE_ALWAYS_SAFE);
}
/* Execute any commands defined in kdb_cmds. */
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
index 7afd3c8c41d5..eaacd1693954 100644
--- a/kernel/debug/kdb/kdb_private.h
+++ b/kernel/debug/kdb/kdb_private.h
@@ -172,10 +172,9 @@ typedef struct _kdbtab {
kdb_func_t cmd_func; /* Function to execute command */
char *cmd_usage; /* Usage String for this command */
char *cmd_help; /* Help message for this command */
- short cmd_flags; /* Parsing flags */
short cmd_minlen; /* Minimum legal # command
* chars required */
- kdb_repeat_t cmd_repeat; /* Does command auto repeat on enter? */
+ kdb_cmdflags_t cmd_flags; /* Command behaviour flags */
} kdbtab_t;
extern int kdb_bt(int, const char **); /* KDB display back trace */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4c1ee7f2bebc..882f835a0d85 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4461,18 +4461,14 @@ perf_output_sample_regs(struct perf_output_handle *handle,
}
static void perf_sample_regs_user(struct perf_regs *regs_user,
- struct pt_regs *regs)
+ struct pt_regs *regs,
+ struct pt_regs *regs_user_copy)
{
- if (!user_mode(regs)) {
- if (current->mm)
- regs = task_pt_regs(current);
- else
- regs = NULL;
- }
-
- if (regs) {
- regs_user->abi = perf_reg_abi(current);
+ if (user_mode(regs)) {
+ regs_user->abi = perf_reg_abi(current);
regs_user->regs = regs;
+ } else if (current->mm) {
+ perf_get_regs_user(regs_user, regs, regs_user_copy);
} else {
regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
regs_user->regs = NULL;
@@ -4951,7 +4947,8 @@ void perf_prepare_sample(struct perf_event_header *header,
}
if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
- perf_sample_regs_user(&data->regs_user, regs);
+ perf_sample_regs_user(&data->regs_user, regs,
+ &data->regs_user_copy);
if (sample_type & PERF_SAMPLE_REGS_USER) {
/* regs dump ABI info */
diff --git a/kernel/exit.c b/kernel/exit.c
index 1ea4369890a3..6806c55475ee 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1287,9 +1287,15 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
static int wait_consider_task(struct wait_opts *wo, int ptrace,
struct task_struct *p)
{
+ /*
+ * We can race with wait_task_zombie() from another thread.
+ * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
+ * can't confuse the checks below.
+ */
+ int exit_state = ACCESS_ONCE(p->exit_state);
int ret;
- if (unlikely(p->exit_state == EXIT_DEAD))
+ if (unlikely(exit_state == EXIT_DEAD))
return 0;
ret = eligible_child(wo, p);
@@ -1310,7 +1316,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
return 0;
}
- if (unlikely(p->exit_state == EXIT_TRACE)) {
+ if (unlikely(exit_state == EXIT_TRACE)) {
/*
* ptrace == 0 means we are the natural parent. In this case
* we should clear notask_error, debugger will notify us.
@@ -1337,7 +1343,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
}
/* slay zombie? */
- if (p->exit_state == EXIT_ZOMBIE) {
+ if (exit_state == EXIT_ZOMBIE) {
/* we don't reap group leaders with subthreads */
if (!delay_group_leader(p)) {
/*
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index 5cf6731b98e9..3ef3736002d8 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -80,13 +80,13 @@ void debug_mutex_unlock(struct mutex *lock)
DEBUG_LOCKS_WARN_ON(lock->owner != current);
DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
- mutex_clear_owner(lock);
}
/*
* __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug
* mutexes so that we can do it here after we've verified state.
*/
+ mutex_clear_owner(lock);
atomic_set(&lock->count, 1);
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b5797b78add6..c0accc00566e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7113,9 +7113,6 @@ void __init sched_init(void)
#ifdef CONFIG_RT_GROUP_SCHED
alloc_size += 2 * nr_cpu_ids * sizeof(void **);
#endif
-#ifdef CONFIG_CPUMASK_OFFSTACK
- alloc_size += num_possible_cpus() * cpumask_size();
-#endif
if (alloc_size) {
ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
@@ -7135,13 +7132,13 @@ void __init sched_init(void)
ptr += nr_cpu_ids * sizeof(void **);
#endif /* CONFIG_RT_GROUP_SCHED */
+ }
#ifdef CONFIG_CPUMASK_OFFSTACK
- for_each_possible_cpu(i) {
- per_cpu(load_balance_mask, i) = (void *)ptr;
- ptr += cpumask_size();
- }
-#endif /* CONFIG_CPUMASK_OFFSTACK */
+ for_each_possible_cpu(i) {
+ per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
+ cpumask_size(), GFP_KERNEL, cpu_to_node(i));
}
+#endif /* CONFIG_CPUMASK_OFFSTACK */
init_rt_bandwidth(&def_rt_bandwidth,
global_rt_period(), global_rt_runtime());
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index e5db8c6feebd..b52092f2636d 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -570,24 +570,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
static
int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
{
- int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq));
- int rorun = dl_se->runtime <= 0;
-
- if (!rorun && !dmiss)
- return 0;
-
- /*
- * If we are beyond our current deadline and we are still
- * executing, then we have already used some of the runtime of
- * the next instance. Thus, if we do not account that, we are
- * stealing bandwidth from the system at each deadline miss!
- */
- if (dmiss) {
- dl_se->runtime = rorun ? dl_se->runtime : 0;
- dl_se->runtime -= rq_clock(rq) - dl_se->deadline;
- }
-
- return 1;
+ return (dl_se->runtime <= 0);
}
extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
@@ -826,10 +809,10 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
* parameters of the task might need updating. Otherwise,
* we want a replenishment of its runtime.
*/
- if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH)
- replenish_dl_entity(dl_se, pi_se);
- else
+ if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
update_dl_entity(dl_se, pi_se);
+ else if (flags & ENQUEUE_REPLENISH)
+ replenish_dl_entity(dl_se, pi_se);
__enqueue_dl_entity(dl_se);
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index df2cdf77f899..40667cbf371b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4005,6 +4005,10 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
{
+ /* init_cfs_bandwidth() was not called */
+ if (!cfs_b->throttled_cfs_rq.next)
+ return;
+
hrtimer_cancel(&cfs_b->period_timer);
hrtimer_cancel(&cfs_b->slack_timer);
}
@@ -4424,7 +4428,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
* wl = S * s'_i; see (2)
*/
if (W > 0 && w < W)
- wl = (w * tg->shares) / W;
+ wl = (w * (long)tg->shares) / W;
else
wl = tg->shares;
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
index b0b1c44e923a..3ccf5c2c1320 100644
--- a/kernel/trace/trace_kdb.c
+++ b/kernel/trace/trace_kdb.c
@@ -132,8 +132,8 @@ static int kdb_ftdump(int argc, const char **argv)
static __init int kdb_ftrace_register(void)
{
- kdb_register_repeat("ftdump", kdb_ftdump, "[skip_#lines] [cpu]",
- "Dump ftrace log", 0, KDB_REPEAT_NONE);
+ kdb_register_flags("ftdump", kdb_ftdump, "[skip_#lines] [cpu]",
+ "Dump ftrace log", 0, KDB_ENABLE_ALWAYS_SAFE);
return 0;
}
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index 358eb81fa28d..c635a107a7de 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -73,6 +73,31 @@ config KGDB_KDB
help
KDB frontend for kernel
+config KDB_DEFAULT_ENABLE
+ hex "KDB: Select kdb command functions to be enabled by default"
+ depends on KGDB_KDB
+ default 0x1
+ help
+ Specifiers which kdb commands are enabled by default. This may
+ be set to 1 or 0 to enable all commands or disable almost all
+ commands.
+
+ Alternatively the following bitmask applies:
+
+ 0x0002 - allow arbitrary reads from memory and symbol lookup
+ 0x0004 - allow arbitrary writes to memory
+ 0x0008 - allow current register state to be inspected
+ 0x0010 - allow current register state to be modified
+ 0x0020 - allow passive inspection (backtrace, process list, lsmod)
+ 0x0040 - allow flow control management (breakpoint, single step)
+ 0x0080 - enable signalling of processes
+ 0x0100 - allow machine to be rebooted
+
+ The config option merely sets the default at boot time. Both
+ issuing 'echo X > /sys/module/kdb/parameters/cmd_enable' or
+ setting with kdb.cmd_enable=X kernel command line option will
+ override the default settings.
+
config KDB_KEYBOARD
bool "KGDB_KDB: keyboard as input device"
depends on VT && KGDB_KDB
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 2404d03e251a..03dd576e6773 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -11,6 +11,7 @@
* 2 of the Licence, or (at your option) any later version.
*/
//#define DEBUG
+#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/assoc_array_priv.h>
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index 56badfc4810a..957d3da53ddd 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -14,7 +14,6 @@ config DEBUG_PAGEALLOC
depends on !KMEMCHECK
select PAGE_EXTENSION
select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC
- select PAGE_GUARD if ARCH_SUPPORTS_DEBUG_PAGEALLOC
---help---
Unmap pages from the kernel linear mapping after free_pages().
This results in a large slowdown, but helps to find certain types
@@ -27,13 +26,5 @@ config DEBUG_PAGEALLOC
that would result in incorrect warnings of memory corruption after
a resume because free pages are not saved to the suspend image.
-config WANT_PAGE_DEBUG_FLAGS
- bool
-
config PAGE_POISONING
bool
- select WANT_PAGE_DEBUG_FLAGS
-
-config PAGE_GUARD
- bool
- select WANT_PAGE_DEBUG_FLAGS
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ef91e856c7e4..851924fa5170 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3043,18 +3043,6 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry,
if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
mem_cgroup_swap_statistics(from, false);
mem_cgroup_swap_statistics(to, true);
- /*
- * This function is only called from task migration context now.
- * It postpones page_counter and refcount handling till the end
- * of task migration(mem_cgroup_clear_mc()) for performance
- * improvement. But we cannot postpone css_get(to) because if
- * the process that has been moved to @to does swap-in, the
- * refcount of @to might be decreased to 0.
- *
- * We are in attach() phase, so the cgroup is guaranteed to be
- * alive, so we can just call css_get().
- */
- css_get(&to->css);
return 0;
}
return -EINVAL;
@@ -4679,6 +4667,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
if (parent_css == NULL) {
root_mem_cgroup = memcg;
page_counter_init(&memcg->memory, NULL);
+ memcg->soft_limit = PAGE_COUNTER_MAX;
page_counter_init(&memcg->memsw, NULL);
page_counter_init(&memcg->kmem, NULL);
}
@@ -4724,6 +4713,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
if (parent->use_hierarchy) {
page_counter_init(&memcg->memory, &parent->memory);
+ memcg->soft_limit = PAGE_COUNTER_MAX;
page_counter_init(&memcg->memsw, &parent->memsw);
page_counter_init(&memcg->kmem, &parent->kmem);
@@ -4733,6 +4723,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
*/
} else {
page_counter_init(&memcg->memory, NULL);
+ memcg->soft_limit = PAGE_COUNTER_MAX;
page_counter_init(&memcg->memsw, NULL);
page_counter_init(&memcg->kmem, NULL);
/*
@@ -4807,7 +4798,7 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX);
mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX);
memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX);
- memcg->soft_limit = 0;
+ memcg->soft_limit = PAGE_COUNTER_MAX;
}
#ifdef CONFIG_MMU
diff --git a/mm/memory.c b/mm/memory.c
index ca920d1fd314..54f3a9b00956 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -235,6 +235,9 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
+ if (!tlb->end)
+ return;
+
tlb_flush(tlb);
mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
@@ -247,7 +250,7 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
struct mmu_gather_batch *batch;
- for (batch = &tlb->local; batch; batch = batch->next) {
+ for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
free_pages_and_swap_cache(batch->pages, batch->nr);
batch->nr = 0;
}
@@ -256,9 +259,6 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
void tlb_flush_mmu(struct mmu_gather *tlb)
{
- if (!tlb->end)
- return;
-
tlb_flush_mmu_tlbonly(tlb);
tlb_flush_mmu_free(tlb);
}
@@ -2137,17 +2137,24 @@ reuse:
if (!dirty_page)
return ret;
- /*
- * Yes, Virginia, this is actually required to prevent a race
- * with clear_page_dirty_for_io() from clearing the page dirty
- * bit after it clear all dirty ptes, but before a racing
- * do_wp_page installs a dirty pte.
- *
- * do_shared_fault is protected similarly.
- */
if (!page_mkwrite) {
- wait_on_page_locked(dirty_page);
- set_page_dirty_balance(dirty_page);
+ struct address_space *mapping;
+ int dirtied;
+
+ lock_page(dirty_page);
+ dirtied = set_page_dirty(dirty_page);
+ VM_BUG_ON_PAGE(PageAnon(dirty_page), dirty_page);
+ mapping = dirty_page->mapping;
+ unlock_page(dirty_page);
+
+ if (dirtied && mapping) {
+ /*
+ * Some device drivers do not set page.mapping
+ * but still dirty their pages
+ */
+ balance_dirty_pages_ratelimited(mapping);
+ }
+
/* file_update_time outside page_lock */
if (vma->vm_file)
file_update_time(vma->vm_file);
@@ -2593,7 +2600,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
if (prev && prev->vm_end == address)
return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
- expand_downwards(vma, address - PAGE_SIZE);
+ return expand_downwards(vma, address - PAGE_SIZE);
}
if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
struct vm_area_struct *next = vma->vm_next;
@@ -2602,7 +2609,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
if (next && next->vm_start == address + PAGE_SIZE)
return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
- expand_upwards(vma, address + PAGE_SIZE);
+ return expand_upwards(vma, address + PAGE_SIZE);
}
return 0;
}
diff --git a/mm/mmap.c b/mm/mmap.c
index 7b36aa7cc89a..7f684d5a8087 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -778,10 +778,12 @@ again: remove_next = 1 + (end > next->vm_end);
if (exporter && exporter->anon_vma && !importer->anon_vma) {
int error;
+ importer->anon_vma = exporter->anon_vma;
error = anon_vma_clone(importer, exporter);
- if (error)
+ if (error) {
+ importer->anon_vma = NULL;
return error;
- importer->anon_vma = exporter->anon_vma;
+ }
}
}
@@ -2099,14 +2101,17 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
{
struct mm_struct *mm = vma->vm_mm;
struct rlimit *rlim = current->signal->rlim;
- unsigned long new_start;
+ unsigned long new_start, actual_size;
/* address space limit tests */
if (!may_expand_vm(mm, grow))
return -ENOMEM;
/* Stack limit test */
- if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+ actual_size = size;
+ if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
+ actual_size -= PAGE_SIZE;
+ if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM;
/* mlock limit tests */
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index d5d81f5384d1..6f4335238e33 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1541,16 +1541,6 @@ pause:
bdi_start_background_writeback(bdi);
}
-void set_page_dirty_balance(struct page *page)
-{
- if (set_page_dirty(page)) {
- struct address_space *mapping = page_mapping(page);
-
- if (mapping)
- balance_dirty_pages_ratelimited(mapping);
- }
-}
-
static DEFINE_PER_CPU(int, bdp_ratelimits);
/*
@@ -2123,32 +2113,25 @@ EXPORT_SYMBOL(account_page_dirtied);
* page dirty in that case, but not all the buffers. This is a "bottom-up"
* dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
*
- * Most callers have locked the page, which pins the address_space in memory.
- * But zap_pte_range() does not lock the page, however in that case the
- * mapping is pinned by the vma's ->vm_file reference.
- *
- * We take care to handle the case where the page was truncated from the
- * mapping by re-checking page_mapping() inside tree_lock.
+ * The caller must ensure this doesn't race with truncation. Most will simply
+ * hold the page lock, but e.g. zap_pte_range() calls with the page mapped and
+ * the pte lock held, which also locks out truncation.
*/
int __set_page_dirty_nobuffers(struct page *page)
{
if (!TestSetPageDirty(page)) {
struct address_space *mapping = page_mapping(page);
- struct address_space *mapping2;
unsigned long flags;
if (!mapping)
return 1;
spin_lock_irqsave(&mapping->tree_lock, flags);
- mapping2 = page_mapping(page);
- if (mapping2) { /* Race with truncate? */
- BUG_ON(mapping2 != mapping);
- WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
- account_page_dirtied(page, mapping);
- radix_tree_tag_set(&mapping->page_tree,
- page_index(page), PAGECACHE_TAG_DIRTY);
- }
+ BUG_ON(page_mapping(page) != mapping);
+ WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
+ account_page_dirtied(page, mapping);
+ radix_tree_tag_set(&mapping->page_tree, page_index(page),
+ PAGECACHE_TAG_DIRTY);
spin_unlock_irqrestore(&mapping->tree_lock, flags);
if (mapping->host) {
/* !PageAnon && !swapper_space */
@@ -2305,12 +2288,10 @@ int clear_page_dirty_for_io(struct page *page)
/*
* We carefully synchronise fault handlers against
* installing a dirty pte and marking the page dirty
- * at this point. We do this by having them hold the
- * page lock at some point after installing their
- * pte, but before marking the page dirty.
- * Pages are always locked coming in here, so we get
- * the desired exclusion. See mm/memory.c:do_wp_page()
- * for more comments.
+ * at this point. We do this by having them hold the
+ * page lock while dirtying the page, and pages are
+ * always locked coming in here, so we get the desired
+ * exclusion.
*/
if (TestClearPageDirty(page)) {
dec_zone_page_state(page, NR_FILE_DIRTY);
diff --git a/mm/rmap.c b/mm/rmap.c
index c5bc241127b2..71cd5bd0c17d 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -72,6 +72,8 @@ static inline struct anon_vma *anon_vma_alloc(void)
anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
if (anon_vma) {
atomic_set(&anon_vma->refcount, 1);
+ anon_vma->degree = 1; /* Reference for first vma */
+ anon_vma->parent = anon_vma;
/*
* Initialise the anon_vma root to point to itself. If called
* from fork, the root will be reset to the parents anon_vma.
@@ -188,6 +190,8 @@ int anon_vma_prepare(struct vm_area_struct *vma)
if (likely(!vma->anon_vma)) {
vma->anon_vma = anon_vma;
anon_vma_chain_link(vma, avc, anon_vma);
+ /* vma reference or self-parent link for new root */
+ anon_vma->degree++;
allocated = NULL;
avc = NULL;
}
@@ -236,6 +240,14 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
/*
* Attach the anon_vmas from src to dst.
* Returns 0 on success, -ENOMEM on failure.
+ *
+ * If dst->anon_vma is NULL this function tries to find and reuse existing
+ * anon_vma which has no vmas and only one child anon_vma. This prevents
+ * degradation of anon_vma hierarchy to endless linear chain in case of
+ * constantly forking task. On the other hand, an anon_vma with more than one
+ * child isn't reused even if there was no alive vma, thus rmap walker has a
+ * good chance of avoiding scanning the whole hierarchy when it searches where
+ * page is mapped.
*/
int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
{
@@ -256,7 +268,21 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
anon_vma = pavc->anon_vma;
root = lock_anon_vma_root(root, anon_vma);
anon_vma_chain_link(dst, avc, anon_vma);
+
+ /*
+ * Reuse existing anon_vma if its degree lower than two,
+ * that means it has no vma and only one anon_vma child.
+ *
+ * Do not chose parent anon_vma, otherwise first child
+ * will always reuse it. Root anon_vma is never reused:
+ * it has self-parent reference and at least one child.
+ */
+ if (!dst->anon_vma && anon_vma != src->anon_vma &&
+ anon_vma->degree < 2)
+ dst->anon_vma = anon_vma;
}
+ if (dst->anon_vma)
+ dst->anon_vma->degree++;
unlock_anon_vma_root(root);
return 0;
@@ -280,6 +306,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
if (!pvma->anon_vma)
return 0;
+ /* Drop inherited anon_vma, we'll reuse existing or allocate new. */
+ vma->anon_vma = NULL;
+
/*
* First, attach the new VMA to the parent VMA's anon_vmas,
* so rmap can find non-COWed pages in child processes.
@@ -288,6 +317,10 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
if (error)
return error;
+ /* An existing anon_vma has been reused, all done then. */
+ if (vma->anon_vma)
+ return 0;
+
/* Then add our own anon_vma. */
anon_vma = anon_vma_alloc();
if (!anon_vma)
@@ -301,6 +334,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
* lock any of the anon_vmas in this anon_vma tree.
*/
anon_vma->root = pvma->anon_vma->root;
+ anon_vma->parent = pvma->anon_vma;
/*
* With refcounts, an anon_vma can stay around longer than the
* process it belongs to. The root anon_vma needs to be pinned until
@@ -311,6 +345,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
vma->anon_vma = anon_vma;
anon_vma_lock_write(anon_vma);
anon_vma_chain_link(vma, avc, anon_vma);
+ anon_vma->parent->degree++;
anon_vma_unlock_write(anon_vma);
return 0;
@@ -341,12 +376,16 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
* Leave empty anon_vmas on the list - we'll need
* to free them outside the lock.
*/
- if (RB_EMPTY_ROOT(&anon_vma->rb_root))
+ if (RB_EMPTY_ROOT(&anon_vma->rb_root)) {
+ anon_vma->parent->degree--;
continue;
+ }
list_del(&avc->same_vma);
anon_vma_chain_free(avc);
}
+ if (vma->anon_vma)
+ vma->anon_vma->degree--;
unlock_anon_vma_root(root);
/*
@@ -357,6 +396,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
struct anon_vma *anon_vma = avc->anon_vma;
+ BUG_ON(anon_vma->degree);
put_anon_vma(anon_vma);
list_del(&avc->same_vma);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index bd9a72bc4a1b..ab2505c3ef54 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2921,18 +2921,20 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
return false;
/*
- * There is a potential race between when kswapd checks its watermarks
- * and a process gets throttled. There is also a potential race if
- * processes get throttled, kswapd wakes, a large process exits therby
- * balancing the zones that causes kswapd to miss a wakeup. If kswapd
- * is going to sleep, no process should be sleeping on pfmemalloc_wait
- * so wake them now if necessary. If necessary, processes will wake
- * kswapd and get throttled again
+ * The throttled processes are normally woken up in balance_pgdat() as
+ * soon as pfmemalloc_watermark_ok() is true. But there is a potential
+ * race between when kswapd checks the watermarks and a process gets
+ * throttled. There is also a potential race if processes get
+ * throttled, kswapd wakes, a large process exits thereby balancing the
+ * zones, which causes kswapd to exit balance_pgdat() before reaching
+ * the wake up checks. If kswapd is going to sleep, no process should
+ * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
+ * the wake up is premature, processes will wake kswapd and get
+ * throttled again. The difference from wake ups in balance_pgdat() is
+ * that here we are under prepare_to_wait().
*/
- if (waitqueue_active(&pgdat->pfmemalloc_wait)) {
- wake_up(&pgdat->pfmemalloc_wait);
- return false;
- }
+ if (waitqueue_active(&pgdat->pfmemalloc_wait))
+ wake_up_all(&pgdat->pfmemalloc_wait);
return pgdat_balanced(pgdat, order, classzone_idx);
}
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index ab6bb2af1d45..b24e4bb64fb5 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -685,11 +685,13 @@ static void batadv_mcast_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
if (orig_initialized)
atomic_dec(&bat_priv->mcast.num_disabled);
orig->capabilities |= BATADV_ORIG_CAPA_HAS_MCAST;
- /* If mcast support is being switched off increase the disabled
- * mcast node counter.
+ /* If mcast support is being switched off or if this is an initial
+ * OGM without mcast support then increase the disabled mcast
+ * node counter.
*/
} else if (!orig_mcast_enabled &&
- orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) {
+ (orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST ||
+ !orig_initialized)) {
atomic_inc(&bat_priv->mcast.num_disabled);
orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_MCAST;
}
@@ -738,7 +740,8 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
{
struct batadv_priv *bat_priv = orig->bat_priv;
- if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST))
+ if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) &&
+ orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST)
atomic_dec(&bat_priv->mcast.num_disabled);
batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index 8d04d174669e..fab47f1f3ef9 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -133,7 +133,7 @@ int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
if (!bat_priv->nc.decoding_hash)
goto err;
- batadv_hash_set_lock_class(bat_priv->nc.coding_hash,
+ batadv_hash_set_lock_class(bat_priv->nc.decoding_hash,
&batadv_nc_decoding_hash_lock_class_key);
INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker);
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 6a484514cd3e..bea8198d0198 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -570,9 +570,6 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
batadv_frag_purge_orig(orig_node, NULL);
- batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1,
- "originator timed out");
-
if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
@@ -678,6 +675,7 @@ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
atomic_set(&orig_node->last_ttvn, 0);
orig_node->tt_buff = NULL;
orig_node->tt_buff_len = 0;
+ orig_node->last_seen = jiffies;
reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
orig_node->bcast_seqno_reset = reset_time;
#ifdef CONFIG_BATMAN_ADV_MCAST
@@ -977,6 +975,9 @@ static void _batadv_purge_orig(struct batadv_priv *bat_priv)
if (batadv_purge_orig_node(bat_priv, orig_node)) {
batadv_gw_node_delete(bat_priv, orig_node);
hlist_del_rcu(&orig_node->hash_entry);
+ batadv_tt_global_del_orig(orig_node->bat_priv,
+ orig_node, -1,
+ "originator timed out");
batadv_orig_node_free_ref(orig_node);
continue;
}
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 35f76f2f7824..6648f321864d 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -443,11 +443,13 @@ batadv_find_router(struct batadv_priv *bat_priv,
router = batadv_orig_router_get(orig_node, recv_if);
+ if (!router)
+ return router;
+
/* only consider bonding for recv_if == BATADV_IF_DEFAULT (first hop)
* and if activated.
*/
- if (recv_if == BATADV_IF_DEFAULT || !atomic_read(&bat_priv->bonding) ||
- !router)
+ if (!(recv_if == BATADV_IF_DEFAULT && atomic_read(&bat_priv->bonding)))
return router;
/* bonding: loop through the list of possible routers found
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 1f1de715197c..e2aa7be3a847 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -154,7 +154,8 @@ int br_handle_frame_finish(struct sk_buff *skb)
dst = NULL;
if (is_broadcast_ether_addr(dest)) {
- if (p->flags & BR_PROXYARP &&
+ if (IS_ENABLED(CONFIG_INET) &&
+ p->flags & BR_PROXYARP &&
skb->protocol == htons(ETH_P_ARP))
br_do_proxy_arp(skb, br, vid);
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index 15845814a0f2..ba6eb17226da 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -676,7 +676,7 @@ static int calcu_signature(struct ceph_x_authorizer *au,
int ret;
char tmp_enc[40];
__le32 tmp[5] = {
- 16u, msg->hdr.crc, msg->footer.front_crc,
+ cpu_to_le32(16), msg->hdr.crc, msg->footer.front_crc,
msg->footer.middle_crc, msg->footer.data_crc,
};
ret = ceph_x_encrypt(&au->session_key, &tmp, sizeof(tmp),
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index a83062ceeec9..f2148e22b148 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -717,7 +717,7 @@ static int get_poolop_reply_buf(const char *src, size_t src_len,
if (src_len != sizeof(u32) + dst_len)
return -EINVAL;
- buf_len = le32_to_cpu(*(u32 *)src);
+ buf_len = le32_to_cpu(*(__le32 *)src);
if (buf_len != dst_len)
return -EINVAL;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 8e38f17288d3..8d614c93f86a 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2043,6 +2043,12 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
case NDTPA_BASE_REACHABLE_TIME:
NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
nla_get_msecs(tbp[i]));
+ /* update reachable_time as well, otherwise, the change will
+ * only be effective after the next time neigh_periodic_work
+ * decides to recompute it (can be multiple minutes)
+ */
+ p->reachable_time =
+ neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
break;
case NDTPA_GC_STALETIME:
NEIGH_VAR_SET(p, GC_STALETIME,
@@ -2921,6 +2927,31 @@ static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
return ret;
}
+static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ struct neigh_parms *p = ctl->extra2;
+ int ret;
+
+ if (strcmp(ctl->procname, "base_reachable_time") == 0)
+ ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
+ else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
+ ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
+ else
+ ret = -1;
+
+ if (write && ret == 0) {
+ /* update reachable_time as well, otherwise, the change will
+ * only be effective after the next time neigh_periodic_work
+ * decides to recompute it
+ */
+ p->reachable_time =
+ neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
+ }
+ return ret;
+}
+
#define NEIGH_PARMS_DATA_OFFSET(index) \
(&((struct neigh_parms *) 0)->data[index])
@@ -3047,6 +3078,19 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
/* ReachableTime (in milliseconds) */
t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
+ } else {
+ /* Those handlers will update p->reachable_time after
+ * base_reachable_time(_ms) is set to ensure the new timer starts being
+ * applied after the next neighbour update instead of waiting for
+ * neigh_periodic_work to update its value (can be multiple minutes)
+ * So any handler that replaces them should do this as well
+ */
+ /* ReachableTime */
+ t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
+ neigh_proc_base_reachable_time;
+ /* ReachableTime (in milliseconds) */
+ t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
+ neigh_proc_base_reachable_time;
}
/* Don't export sysctls to unprivileged users */
diff --git a/net/ipv4/netfilter/nft_redir_ipv4.c b/net/ipv4/netfilter/nft_redir_ipv4.c
index ff2d23d8c87a..6ecfce63201a 100644
--- a/net/ipv4/netfilter/nft_redir_ipv4.c
+++ b/net/ipv4/netfilter/nft_redir_ipv4.c
@@ -27,10 +27,10 @@ static void nft_redir_ipv4_eval(const struct nft_expr *expr,
memset(&mr, 0, sizeof(mr));
if (priv->sreg_proto_min) {
- mr.range[0].min.all = (__force __be16)
- data[priv->sreg_proto_min].data[0];
- mr.range[0].max.all = (__force __be16)
- data[priv->sreg_proto_max].data[0];
+ mr.range[0].min.all =
+ *(__be16 *)&data[priv->sreg_proto_min].data[0];
+ mr.range[0].max.all =
+ *(__be16 *)&data[priv->sreg_proto_max].data[0];
mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 7f18262e2326..65caf8b95e17 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2019,7 +2019,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
break;
- if (tso_segs == 1) {
+ if (tso_segs == 1 || !max_segs) {
if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
(tcp_skb_is_last(sk, skb) ?
nonagle : TCP_NAGLE_PUSH))))
@@ -2032,7 +2032,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
}
limit = mss_now;
- if (tso_segs > 1 && !tcp_urg_mode(tp))
+ if (tso_segs > 1 && max_segs && !tcp_urg_mode(tp))
limit = tcp_mss_split_point(sk, skb, mss_now,
min_t(unsigned int,
cwnd_quota,
diff --git a/net/ipv6/netfilter/nft_redir_ipv6.c b/net/ipv6/netfilter/nft_redir_ipv6.c
index 2433a6bfb191..11820b6b3613 100644
--- a/net/ipv6/netfilter/nft_redir_ipv6.c
+++ b/net/ipv6/netfilter/nft_redir_ipv6.c
@@ -27,10 +27,10 @@ static void nft_redir_ipv6_eval(const struct nft_expr *expr,
memset(&range, 0, sizeof(range));
if (priv->sreg_proto_min) {
- range.min_proto.all = (__force __be16)
- data[priv->sreg_proto_min].data[0];
- range.max_proto.all = (__force __be16)
- data[priv->sreg_proto_max].data[0];
+ range.min_proto.all =
+ *(__be16 *)&data[priv->sreg_proto_min].data[0];
+ range.max_proto.all =
+ *(__be16 *)&data[priv->sreg_proto_max].data[0];
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 0bb7038121ac..bd4e46ec32bd 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -140,7 +140,9 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
if (!ret) {
key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
- if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
+ if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
sdata->crypto_tx_tailroom_needed_cnt--;
WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
@@ -188,7 +190,9 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
sta = key->sta;
sdata = key->sdata;
- if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
+ if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
increment_tailroom_need_count(sdata);
ret = drv_set_key(key->local, DISABLE_KEY, sdata,
@@ -884,7 +888,9 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf)
if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
- if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
+ if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
increment_tailroom_need_count(key->sdata);
}
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 1d5341f3761d..5d3daae98bf0 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -183,6 +183,8 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
struct nf_conn *ct;
struct net *net;
+ *diff = 0;
+
#ifdef CONFIG_IP_VS_IPV6
/* This application helper doesn't work with IPv6 yet,
* so turn this into a no-op for IPv6 packets
@@ -191,8 +193,6 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
return 1;
#endif
- *diff = 0;
-
/* Only useful for established sessions */
if (cp->state != IP_VS_TCP_S_ESTABLISHED)
return 1;
@@ -322,6 +322,9 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
struct ip_vs_conn *n_cp;
struct net *net;
+ /* no diff required for incoming packets */
+ *diff = 0;
+
#ifdef CONFIG_IP_VS_IPV6
/* This application helper doesn't work with IPv6 yet,
* so turn this into a no-op for IPv6 packets
@@ -330,9 +333,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
return 1;
#endif
- /* no diff required for incoming packets */
- *diff = 0;
-
/* Only useful for established sessions */
if (cp->state != IP_VS_TCP_S_ESTABLISHED)
return 1;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index a11674806707..46d1b26a468e 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -611,16 +611,15 @@ __nf_conntrack_confirm(struct sk_buff *skb)
*/
NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
pr_debug("Confirming conntrack %p\n", ct);
- /* We have to check the DYING flag inside the lock to prevent
- a race against nf_ct_get_next_corpse() possibly called from
- user context, else we insert an already 'dead' hash, blocking
- further use of that particular connection -JM */
+ /* We have to check the DYING flag after unlink to prevent
+ * a race against nf_ct_get_next_corpse() possibly called from
+ * user context, else we insert an already 'dead' hash, blocking
+ * further use of that particular connection -JM.
+ */
+ nf_ct_del_from_dying_or_unconfirmed_list(ct);
- if (unlikely(nf_ct_is_dying(ct))) {
- nf_conntrack_double_unlock(hash, reply_hash);
- local_bh_enable();
- return NF_ACCEPT;
- }
+ if (unlikely(nf_ct_is_dying(ct)))
+ goto out;
/* See if there's one in the list already, including reverse:
NAT could have grabbed it without realizing, since we're
@@ -636,8 +635,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
goto out;
- nf_ct_del_from_dying_or_unconfirmed_list(ct);
-
/* Timer relative to confirmation time, not original
setting time, otherwise we'd get timer wrap in
weird delay cases. */
@@ -673,6 +670,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
return NF_ACCEPT;
out:
+ nf_ct_add_to_dying_list(ct);
nf_conntrack_double_unlock(hash, reply_hash);
NF_CT_STAT_INC(net, insert_failed);
local_bh_enable();
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 129a8daa4abf..3b3ddb4fb9ee 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -713,16 +713,12 @@ static int nft_flush_table(struct nft_ctx *ctx)
struct nft_chain *chain, *nc;
struct nft_set *set, *ns;
- list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) {
+ list_for_each_entry(chain, &ctx->table->chains, list) {
ctx->chain = chain;
err = nft_delrule_by_chain(ctx);
if (err < 0)
goto out;
-
- err = nft_delchain(ctx);
- if (err < 0)
- goto out;
}
list_for_each_entry_safe(set, ns, &ctx->table->sets, list) {
@@ -735,6 +731,14 @@ static int nft_flush_table(struct nft_ctx *ctx)
goto out;
}
+ list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) {
+ ctx->chain = chain;
+
+ err = nft_delchain(ctx);
+ if (err < 0)
+ goto out;
+ }
+
err = nft_deltable(ctx);
out:
return err;
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index cde4a6702fa3..c421d94c4652 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -321,7 +321,8 @@ replay:
nlh = nlmsg_hdr(skb);
err = 0;
- if (nlh->nlmsg_len < NLMSG_HDRLEN) {
+ if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) ||
+ skb->len < nlh->nlmsg_len) {
err = -EINVAL;
goto ack;
}
@@ -469,7 +470,7 @@ static int nfnetlink_bind(struct net *net, int group)
int type;
if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX)
- return -EINVAL;
+ return 0;
type = nfnl_group2type[group];
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index afe2b0b45ec4..aff54fb1c8a0 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -65,10 +65,10 @@ static void nft_nat_eval(const struct nft_expr *expr,
}
if (priv->sreg_proto_min) {
- range.min_proto.all = (__force __be16)
- data[priv->sreg_proto_min].data[0];
- range.max_proto.all = (__force __be16)
- data[priv->sreg_proto_max].data[0];
+ range.min_proto.all =
+ *(__be16 *)&data[priv->sreg_proto_min].data[0];
+ range.max_proto.all =
+ *(__be16 *)&data[priv->sreg_proto_max].data[0];
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 4e9a5f035cbc..b07349e82d78 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -524,7 +524,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
struct vport *input_vport;
int len;
int err;
- bool log = !a[OVS_FLOW_ATTR_PROBE];
+ bool log = !a[OVS_PACKET_ATTR_PROBE];
err = -EINVAL;
if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
@@ -610,6 +610,7 @@ static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
[OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
[OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
[OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
+ [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
};
static const struct genl_ops dp_packet_genl_ops[] = {
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 70bef2ab7f2b..da2fae0873a5 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -70,6 +70,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
{
struct flow_stats *stats;
int node = numa_node_id();
+ int len = skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
stats = rcu_dereference(flow->stats[node]);
@@ -105,7 +106,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
if (likely(new_stats)) {
new_stats->used = jiffies;
new_stats->packet_count = 1;
- new_stats->byte_count = skb->len;
+ new_stats->byte_count = len;
new_stats->tcp_flags = tcp_flags;
spin_lock_init(&new_stats->lock);
@@ -120,7 +121,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
stats->used = jiffies;
stats->packet_count++;
- stats->byte_count += skb->len;
+ stats->byte_count += len;
stats->tcp_flags |= tcp_flags;
unlock:
spin_unlock(&stats->lock);
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 53f3ebbfceab..2034c6d9cb5a 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -480,7 +480,7 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
stats = this_cpu_ptr(vport->percpu_stats);
u64_stats_update_begin(&stats->syncp);
stats->rx_packets++;
- stats->rx_bytes += skb->len;
+ stats->rx_bytes += skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
u64_stats_update_end(&stats->syncp);
OVS_CB(skb)->input_vport = vport;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 6880f34a529a..9cfe2e1dd8b5 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2517,7 +2517,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
err = -EINVAL;
if (sock->type == SOCK_DGRAM) {
offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
- if (unlikely(offset) < 0)
+ if (unlikely(offset < 0))
goto out_free;
} else {
if (ll_header_truncated(dev, len))
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 96ceefeb9daf..a9e174fc0f91 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -220,10 +220,11 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
struct sk_buff *skb;
skb_queue_walk(&bcl->outqueue, skb) {
- if (more(buf_seqno(skb), after))
+ if (more(buf_seqno(skb), after)) {
+ tipc_link_retransmit(bcl, skb, mod(to - after));
break;
+ }
}
- tipc_link_retransmit(bcl, skb, mod(to - after));
}
/**
diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
index 1bca180db8ad..627f8cbbedb8 100644
--- a/scripts/Makefile.clean
+++ b/scripts/Makefile.clean
@@ -42,19 +42,19 @@ __clean-files := $(extra-y) $(extra-m) $(extra-) \
__clean-files := $(filter-out $(no-clean-files), $(__clean-files))
-# as clean-files is given relative to the current directory, this adds
-# a $(obj) prefix, except for absolute paths
+# clean-files is given relative to the current directory, unless it
+# starts with $(objtree)/ (which means "./", so do not add "./" unless
+# you want to delete a file from the toplevel object directory).
__clean-files := $(wildcard \
- $(addprefix $(obj)/, $(filter-out /%, $(__clean-files))) \
- $(filter /%, $(__clean-files)))
+ $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(__clean-files))) \
+ $(filter $(objtree)/%, $(__clean-files)))
-# as clean-dirs is given relative to the current directory, this adds
-# a $(obj) prefix, except for absolute paths
+# same as clean-files
__clean-dirs := $(wildcard \
- $(addprefix $(obj)/, $(filter-out /%, $(clean-dirs))) \
- $(filter /%, $(clean-dirs)))
+ $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(clean-dirs))) \
+ $(filter $(objtree)/%, $(clean-dirs)))
# ==========================================================================
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 9609a7f0faea..c7952375ac53 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -148,12 +148,12 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
atomic_dec(&key->user->nikeys);
- key_user_put(key->user);
-
/* now throw away the key memory */
if (key->type->destroy)
key->type->destroy(key);
+ key_user_put(key->user);
+
kfree(key->description);
#ifdef KEY_DEBUGGING
diff --git a/sound/firewire/fireworks/fireworks_transaction.c b/sound/firewire/fireworks/fireworks_transaction.c
index 255dabc6fc33..2a85e4209f0b 100644
--- a/sound/firewire/fireworks/fireworks_transaction.c
+++ b/sound/firewire/fireworks/fireworks_transaction.c
@@ -124,7 +124,7 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
spin_lock_irq(&efw->lock);
t = (struct snd_efw_transaction *)data;
- length = min_t(size_t, t->length * sizeof(t->length), length);
+ length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length);
if (efw->push_ptr < efw->pull_ptr)
capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr);
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 5f13d2d18079..b422e406a9cb 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -3353,6 +3353,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
{ .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch },
{ .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi },
{ .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi },
+{ .id = 0x10de0072, .name = "GPU 72 HDMI/DP", .patch = patch_nvhdmi },
{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch },
{ .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
{ .id = 0x11069f81, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
@@ -3413,6 +3414,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0060");
MODULE_ALIAS("snd-hda-codec-id:10de0067");
MODULE_ALIAS("snd-hda-codec-id:10de0070");
MODULE_ALIAS("snd-hda-codec-id:10de0071");
+MODULE_ALIAS("snd-hda-codec-id:10de0072");
MODULE_ALIAS("snd-hda-codec-id:10de8001");
MODULE_ALIAS("snd-hda-codec-id:11069f80");
MODULE_ALIAS("snd-hda-codec-id:11069f81");
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 4f6413e01c13..605d14003d25 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -568,9 +568,9 @@ static void stac_store_hints(struct hda_codec *codec)
spec->gpio_mask;
}
if (get_int_hint(codec, "gpio_dir", &spec->gpio_dir))
- spec->gpio_mask &= spec->gpio_mask;
- if (get_int_hint(codec, "gpio_data", &spec->gpio_data))
spec->gpio_dir &= spec->gpio_mask;
+ if (get_int_hint(codec, "gpio_data", &spec->gpio_data))
+ spec->gpio_data &= spec->gpio_mask;
if (get_int_hint(codec, "eapd_mask", &spec->eapd_mask))
spec->eapd_mask &= spec->gpio_mask;
if (get_int_hint(codec, "gpio_mute", &spec->gpio_mute))
diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c
index 272844746135..327f8642ca80 100644
--- a/sound/usb/caiaq/audio.c
+++ b/sound/usb/caiaq/audio.c
@@ -816,7 +816,7 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *cdev)
return -EINVAL;
}
- if (cdev->n_streams < 2) {
+ if (cdev->n_streams < 1) {
dev_err(dev, "bogus number of streams: %d\n", cdev->n_streams);
return -EINVAL;
}
diff --git a/tools/lib/lockdep/preload.c b/tools/lib/lockdep/preload.c
index 6f803609e498..0b0112c80f22 100644
--- a/tools/lib/lockdep/preload.c
+++ b/tools/lib/lockdep/preload.c
@@ -317,7 +317,7 @@ int pthread_mutex_destroy(pthread_mutex_t *mutex)
*
* TODO: Hook into free() and add that check there as well.
*/
- debug_check_no_locks_freed(mutex, mutex + sizeof(*mutex));
+ debug_check_no_locks_freed(mutex, sizeof(*mutex));
__del_lock(__get_lock(mutex));
return ll_pthread_mutex_destroy(mutex);
}
@@ -341,7 +341,7 @@ int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
{
try_init_preload();
- debug_check_no_locks_freed(rwlock, rwlock + sizeof(*rwlock));
+ debug_check_no_locks_freed(rwlock, sizeof(*rwlock));
__del_lock(__get_lock(rwlock));
return ll_pthread_rwlock_destroy(rwlock);
}
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index e7417fe97a97..747f86103599 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -232,7 +232,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
if (nr_samples > 0) {
total_nr_samples += nr_samples;
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
if (symbol_conf.event_group &&
!perf_evsel__is_group_leader(pos))
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 1ce425d101a9..1fd96c13f199 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -545,6 +545,42 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
return __hist_entry__cmp_compute(p_left, p_right, c);
}
+static int64_t
+hist_entry__cmp_nop(struct hist_entry *left __maybe_unused,
+ struct hist_entry *right __maybe_unused)
+{
+ return 0;
+}
+
+static int64_t
+hist_entry__cmp_baseline(struct hist_entry *left, struct hist_entry *right)
+{
+ if (sort_compute)
+ return 0;
+
+ if (left->stat.period == right->stat.period)
+ return 0;
+ return left->stat.period > right->stat.period ? 1 : -1;
+}
+
+static int64_t
+hist_entry__cmp_delta(struct hist_entry *left, struct hist_entry *right)
+{
+ return hist_entry__cmp_compute(right, left, COMPUTE_DELTA);
+}
+
+static int64_t
+hist_entry__cmp_ratio(struct hist_entry *left, struct hist_entry *right)
+{
+ return hist_entry__cmp_compute(right, left, COMPUTE_RATIO);
+}
+
+static int64_t
+hist_entry__cmp_wdiff(struct hist_entry *left, struct hist_entry *right)
+{
+ return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF);
+}
+
static void insert_hist_entry_by_compute(struct rb_root *root,
struct hist_entry *he,
int c)
@@ -605,7 +641,7 @@ static void hists__process(struct hists *hists)
hists__precompute(hists);
hists__compute_resort(hists);
} else {
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
}
hists__fprintf(hists, true, 0, 0, 0, stdout);
@@ -1038,27 +1074,35 @@ static void data__hpp_register(struct data__file *d, int idx)
fmt->header = hpp__header;
fmt->width = hpp__width;
fmt->entry = hpp__entry_global;
+ fmt->cmp = hist_entry__cmp_nop;
+ fmt->collapse = hist_entry__cmp_nop;
/* TODO more colors */
switch (idx) {
case PERF_HPP_DIFF__BASELINE:
fmt->color = hpp__color_baseline;
+ fmt->sort = hist_entry__cmp_baseline;
break;
case PERF_HPP_DIFF__DELTA:
fmt->color = hpp__color_delta;
+ fmt->sort = hist_entry__cmp_delta;
break;
case PERF_HPP_DIFF__RATIO:
fmt->color = hpp__color_ratio;
+ fmt->sort = hist_entry__cmp_ratio;
break;
case PERF_HPP_DIFF__WEIGHTED_DIFF:
fmt->color = hpp__color_wdiff;
+ fmt->sort = hist_entry__cmp_wdiff;
break;
default:
+ fmt->sort = hist_entry__cmp_nop;
break;
}
init_header(d, dfmt);
perf_hpp__column_register(fmt);
+ perf_hpp__register_sort_field(fmt);
}
static void ui_init(void)
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index 011195e38f21..198f3c3aff95 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -19,7 +19,9 @@
int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
{
int i;
- const struct option list_options[] = {
+ bool raw_dump = false;
+ struct option list_options[] = {
+ OPT_BOOLEAN(0, "raw-dump", &raw_dump, "Dump raw events"),
OPT_END()
};
const char * const list_usage[] = {
@@ -27,11 +29,18 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
NULL
};
+ set_option_flag(list_options, 0, "raw-dump", PARSE_OPT_HIDDEN);
+
argc = parse_options(argc, argv, list_options, list_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
setup_pager();
+ if (raw_dump) {
+ print_events(NULL, true);
+ return 0;
+ }
+
if (argc == 0) {
print_events(NULL, false);
return 0;
@@ -53,8 +62,6 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
print_hwcache_events(NULL, false);
else if (strcmp(argv[i], "pmu") == 0)
print_pmu_events(NULL, false);
- else if (strcmp(argv[i], "--raw-dump") == 0)
- print_events(NULL, true);
else {
char *sep = strchr(argv[i], ':'), *s;
int sep_idx;
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 39367609c707..072ae8ad67fc 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -457,6 +457,19 @@ static void report__collapse_hists(struct report *rep)
ui_progress__finish();
}
+static void report__output_resort(struct report *rep)
+{
+ struct ui_progress prog;
+ struct perf_evsel *pos;
+
+ ui_progress__init(&prog, rep->nr_entries, "Sorting events for output...");
+
+ evlist__for_each(rep->session->evlist, pos)
+ hists__output_resort(evsel__hists(pos), &prog);
+
+ ui_progress__finish();
+}
+
static int __cmd_report(struct report *rep)
{
int ret;
@@ -505,13 +518,20 @@ static int __cmd_report(struct report *rep)
if (session_done())
return 0;
+ /*
+ * recalculate number of entries after collapsing since it
+ * might be changed during the collapse phase.
+ */
+ rep->nr_entries = 0;
+ evlist__for_each(session->evlist, pos)
+ rep->nr_entries += evsel__hists(pos)->nr_entries;
+
if (rep->nr_entries == 0) {
ui__error("The %s file has no samples!\n", file->path);
return 0;
}
- evlist__for_each(session->evlist, pos)
- hists__output_resort(evsel__hists(pos));
+ report__output_resort(rep);
return report__browse_hists(rep);
}
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 0aa7747ff139..961cea183a83 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -285,7 +285,7 @@ static void perf_top__print_sym_table(struct perf_top *top)
}
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
hists__output_recalc_col_len(hists, top->print_entries - printed);
putchar('\n');
@@ -554,7 +554,7 @@ static void perf_top__sort_new_samples(void *arg)
}
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
}
static void *display_thread_tui(void *arg)
diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c
index 614d5c4978ab..8d110dec393e 100644
--- a/tools/perf/tests/hists_cumulate.c
+++ b/tools/perf/tests/hists_cumulate.c
@@ -187,7 +187,7 @@ static int do_test(struct hists *hists, struct result *expected, size_t nr_expec
* function since TEST_ASSERT_VAL() returns in case of failure.
*/
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
if (verbose > 2) {
pr_info("use callchain: %d, cumulate callchain: %d\n",
@@ -454,12 +454,12 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
* 30.00% 10.00% perf perf [.] cmd_record
* 20.00% 0.00% bash libc [.] malloc
* 10.00% 10.00% bash [kernel] [k] page_fault
- * 10.00% 10.00% perf [kernel] [k] schedule
- * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
+ * 10.00% 10.00% bash bash [.] xmalloc
* 10.00% 10.00% perf [kernel] [k] page_fault
- * 10.00% 10.00% perf libc [.] free
* 10.00% 10.00% perf libc [.] malloc
- * 10.00% 10.00% bash bash [.] xmalloc
+ * 10.00% 10.00% perf [kernel] [k] schedule
+ * 10.00% 10.00% perf libc [.] free
+ * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
*/
struct result expected[] = {
{ 7000, 2000, "perf", "perf", "main" },
@@ -468,12 +468,12 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
{ 3000, 1000, "perf", "perf", "cmd_record" },
{ 2000, 0, "bash", "libc", "malloc" },
{ 1000, 1000, "bash", "[kernel]", "page_fault" },
- { 1000, 1000, "perf", "[kernel]", "schedule" },
- { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
+ { 1000, 1000, "bash", "bash", "xmalloc" },
{ 1000, 1000, "perf", "[kernel]", "page_fault" },
+ { 1000, 1000, "perf", "[kernel]", "schedule" },
{ 1000, 1000, "perf", "libc", "free" },
{ 1000, 1000, "perf", "libc", "malloc" },
- { 1000, 1000, "bash", "bash", "xmalloc" },
+ { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
};
symbol_conf.use_callchain = false;
@@ -537,10 +537,13 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
* malloc
* main
*
- * 10.00% 10.00% perf [kernel] [k] schedule
+ * 10.00% 10.00% bash bash [.] xmalloc
* |
- * --- schedule
- * run_command
+ * --- xmalloc
+ * malloc
+ * xmalloc <--- NOTE: there's a cycle
+ * malloc
+ * xmalloc
* main
*
* 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
@@ -556,6 +559,12 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
* run_command
* main
*
+ * 10.00% 10.00% perf [kernel] [k] schedule
+ * |
+ * --- schedule
+ * run_command
+ * main
+ *
* 10.00% 10.00% perf libc [.] free
* |
* --- free
@@ -570,15 +579,6 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
* run_command
* main
*
- * 10.00% 10.00% bash bash [.] xmalloc
- * |
- * --- xmalloc
- * malloc
- * xmalloc <--- NOTE: there's a cycle
- * malloc
- * xmalloc
- * main
- *
*/
struct result expected[] = {
{ 7000, 2000, "perf", "perf", "main" },
@@ -587,12 +587,12 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
{ 3000, 1000, "perf", "perf", "cmd_record" },
{ 2000, 0, "bash", "libc", "malloc" },
{ 1000, 1000, "bash", "[kernel]", "page_fault" },
- { 1000, 1000, "perf", "[kernel]", "schedule" },
+ { 1000, 1000, "bash", "bash", "xmalloc" },
{ 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
{ 1000, 1000, "perf", "[kernel]", "page_fault" },
+ { 1000, 1000, "perf", "[kernel]", "schedule" },
{ 1000, 1000, "perf", "libc", "free" },
{ 1000, 1000, "perf", "libc", "malloc" },
- { 1000, 1000, "bash", "bash", "xmalloc" },
};
struct callchain_result expected_callchain[] = {
{
@@ -622,9 +622,12 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
{ "bash", "main" }, },
},
{
- 3, { { "[kernel]", "schedule" },
- { "perf", "run_command" },
- { "perf", "main" }, },
+ 6, { { "bash", "xmalloc" },
+ { "libc", "malloc" },
+ { "bash", "xmalloc" },
+ { "libc", "malloc" },
+ { "bash", "xmalloc" },
+ { "bash", "main" }, },
},
{
3, { { "[kernel]", "sys_perf_event_open" },
@@ -638,6 +641,11 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
{ "perf", "main" }, },
},
{
+ 3, { { "[kernel]", "schedule" },
+ { "perf", "run_command" },
+ { "perf", "main" }, },
+ },
+ {
4, { { "libc", "free" },
{ "perf", "cmd_record" },
{ "perf", "run_command" },
@@ -649,14 +657,6 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
{ "perf", "run_command" },
{ "perf", "main" }, },
},
- {
- 6, { { "bash", "xmalloc" },
- { "libc", "malloc" },
- { "bash", "xmalloc" },
- { "libc", "malloc" },
- { "bash", "xmalloc" },
- { "bash", "main" }, },
- },
};
symbol_conf.use_callchain = true;
diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c
index 74f257a81265..59e53db7914c 100644
--- a/tools/perf/tests/hists_filter.c
+++ b/tools/perf/tests/hists_filter.c
@@ -138,7 +138,7 @@ int test__hists_filter(void)
struct hists *hists = evsel__hists(evsel);
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
if (verbose > 2) {
pr_info("Normal histogram\n");
diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c
index a748f2be1222..f5547610da02 100644
--- a/tools/perf/tests/hists_output.c
+++ b/tools/perf/tests/hists_output.c
@@ -152,7 +152,7 @@ static int test1(struct perf_evsel *evsel, struct machine *machine)
goto out;
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
if (verbose > 2) {
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -252,7 +252,7 @@ static int test2(struct perf_evsel *evsel, struct machine *machine)
goto out;
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
if (verbose > 2) {
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -306,7 +306,7 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
goto out;
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
if (verbose > 2) {
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -384,7 +384,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
goto out;
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
if (verbose > 2) {
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -487,7 +487,7 @@ static int test5(struct perf_evsel *evsel, struct machine *machine)
goto out;
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
if (verbose > 2) {
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index e6bb04b5b09b..788506eef567 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -550,7 +550,7 @@ static int hist_browser__show_callchain(struct hist_browser *browser,
bool need_percent;
node = rb_first(root);
- need_percent = !!rb_next(node);
+ need_percent = node && rb_next(node);
while (node) {
struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index dc0d095f318c..482adae3cc44 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -204,6 +204,9 @@ static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
if (ret)
return ret;
+ if (a->thread != b->thread || !symbol_conf.use_callchain)
+ return 0;
+
ret = b->callchain->max_depth - a->callchain->max_depth;
}
return ret;
diff --git a/tools/perf/ui/tui/setup.c b/tools/perf/ui/tui/setup.c
index 2f612562978c..3c38f25b1695 100644
--- a/tools/perf/ui/tui/setup.c
+++ b/tools/perf/ui/tui/setup.c
@@ -1,5 +1,8 @@
#include <signal.h>
#include <stdbool.h>
+#ifdef HAVE_BACKTRACE_SUPPORT
+#include <execinfo.h>
+#endif
#include "../../util/cache.h"
#include "../../util/debug.h"
@@ -88,6 +91,25 @@ int ui__getch(int delay_secs)
return SLkp_getkey();
}
+#ifdef HAVE_BACKTRACE_SUPPORT
+static void ui__signal_backtrace(int sig)
+{
+ void *stackdump[32];
+ size_t size;
+
+ ui__exit(false);
+ psignal(sig, "perf");
+
+ printf("-------- backtrace --------\n");
+ size = backtrace(stackdump, ARRAY_SIZE(stackdump));
+ backtrace_symbols_fd(stackdump, size, STDOUT_FILENO);
+
+ exit(0);
+}
+#else
+# define ui__signal_backtrace ui__signal
+#endif
+
static void ui__signal(int sig)
{
ui__exit(false);
@@ -122,8 +144,8 @@ int ui__init(void)
ui_browser__init();
tui_progress__init();
- signal(SIGSEGV, ui__signal);
- signal(SIGFPE, ui__signal);
+ signal(SIGSEGV, ui__signal_backtrace);
+ signal(SIGFPE, ui__signal_backtrace);
signal(SIGINT, ui__signal);
signal(SIGQUIT, ui__signal);
signal(SIGTERM, ui__signal);
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 64b377e591e4..14e7a123d43b 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -841,3 +841,33 @@ char *callchain_list__sym_name(struct callchain_list *cl,
return bf;
}
+
+static void free_callchain_node(struct callchain_node *node)
+{
+ struct callchain_list *list, *tmp;
+ struct callchain_node *child;
+ struct rb_node *n;
+
+ list_for_each_entry_safe(list, tmp, &node->val, list) {
+ list_del(&list->list);
+ free(list);
+ }
+
+ n = rb_first(&node->rb_root_in);
+ while (n) {
+ child = container_of(n, struct callchain_node, rb_node_in);
+ n = rb_next(n);
+ rb_erase(&child->rb_node_in, &node->rb_root_in);
+
+ free_callchain_node(child);
+ free(child);
+ }
+}
+
+void free_callchain(struct callchain_root *root)
+{
+ if (!symbol_conf.use_callchain)
+ return;
+
+ free_callchain_node(&root->node);
+}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index dbc08cf5f970..c0ec1acc38e4 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -198,4 +198,6 @@ static inline int arch_skip_callchain_idx(struct thread *thread __maybe_unused,
char *callchain_list__sym_name(struct callchain_list *cl,
char *bf, size_t bfsize, bool show_dso);
+void free_callchain(struct callchain_root *root);
+
#endif /* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 6e88b9e395df..182395546ddc 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -6,6 +6,7 @@
#include "evlist.h"
#include "evsel.h"
#include "annotate.h"
+#include "ui/progress.h"
#include <math.h>
static bool hists__filter_entry_by_dso(struct hists *hists,
@@ -303,7 +304,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template,
size_t callchain_size = 0;
struct hist_entry *he;
- if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain)
+ if (symbol_conf.use_callchain)
callchain_size = sizeof(struct callchain_root);
he = zalloc(sizeof(*he) + callchain_size);
@@ -736,7 +737,7 @@ iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
iter->he = he;
he_cache[iter->curr++] = he;
- callchain_append(he->callchain, &callchain_cursor, sample->period);
+ hist_entry__append_callchain(he, sample);
/*
* We need to re-initialize the cursor since callchain_append()
@@ -809,7 +810,8 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
iter->he = he;
he_cache[iter->curr++] = he;
- callchain_append(he->callchain, &cursor, sample->period);
+ if (symbol_conf.use_callchain)
+ callchain_append(he->callchain, &cursor, sample->period);
return 0;
}
@@ -945,6 +947,7 @@ void hist_entry__free(struct hist_entry *he)
zfree(&he->mem_info);
zfree(&he->stat_acc);
free_srcline(he->srcline);
+ free_callchain(he->callchain);
free(he);
}
@@ -987,6 +990,7 @@ static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
else
p = &(*p)->rb_right;
}
+ hists->nr_entries++;
rb_link_node(&he->rb_node_in, parent, p);
rb_insert_color(&he->rb_node_in, root);
@@ -1024,7 +1028,10 @@ void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
if (!sort__need_collapse)
return;
+ hists->nr_entries = 0;
+
root = hists__get_rotate_entries_in(hists);
+
next = rb_first(root);
while (next) {
@@ -1119,7 +1126,7 @@ static void __hists__insert_output_entry(struct rb_root *entries,
rb_insert_color(&he->rb_node, entries);
}
-void hists__output_resort(struct hists *hists)
+void hists__output_resort(struct hists *hists, struct ui_progress *prog)
{
struct rb_root *root;
struct rb_node *next;
@@ -1148,6 +1155,9 @@ void hists__output_resort(struct hists *hists)
if (!n->filtered)
hists__calc_col_len(hists, n);
+
+ if (prog)
+ ui_progress__update(prog, 1);
}
}
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index d0ef9a19a744..46bd50344f85 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -121,7 +121,7 @@ int hist_entry__sort_snprintf(struct hist_entry *he, char *bf, size_t size,
struct hists *hists);
void hist_entry__free(struct hist_entry *);
-void hists__output_resort(struct hists *hists);
+void hists__output_resort(struct hists *hists, struct ui_progress *prog);
void hists__collapse_resort(struct hists *hists, struct ui_progress *prog);
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel);
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 28eb1417cb2a..7f9b8632e433 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -495,9 +495,11 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
}
if (ntevs == 0) { /* No error but failed to find probe point. */
- pr_warning("Probe point '%s' not found.\n",
+ pr_warning("Probe point '%s' not found in debuginfo.\n",
synthesize_perf_probe_point(&pev->point));
- return -ENOENT;
+ if (need_dwarf)
+ return -ENOENT;
+ return 0;
}
/* Error path : ntevs < 0 */
pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index c7918f83b300..b5247d777f0e 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -989,8 +989,24 @@ static int debuginfo__find_probes(struct debuginfo *dbg,
int ret = 0;
#if _ELFUTILS_PREREQ(0, 142)
+ Elf *elf;
+ GElf_Ehdr ehdr;
+ GElf_Shdr shdr;
+
/* Get the call frame information from this dwarf */
- pf->cfi = dwarf_getcfi_elf(dwarf_getelf(dbg->dbg));
+ elf = dwarf_getelf(dbg->dbg);
+ if (elf == NULL)
+ return -EINVAL;
+
+ if (gelf_getehdr(elf, &ehdr) == NULL)
+ return -EINVAL;
+
+ if (elf_section_by_name(elf, &ehdr, &shdr, ".eh_frame", NULL) &&
+ shdr.sh_type == SHT_PROGBITS) {
+ pf->cfi = dwarf_getcfi_elf(elf);
+ } else {
+ pf->cfi = dwarf_getcfi(dbg->dbg);
+ }
#endif
off = 0;
diff --git a/tools/testing/selftests/exec/execveat.c b/tools/testing/selftests/exec/execveat.c
index d273624c93a6..e238c9559caf 100644
--- a/tools/testing/selftests/exec/execveat.c
+++ b/tools/testing/selftests/exec/execveat.c
@@ -62,7 +62,7 @@ static int _check_execveat_fail(int fd, const char *path, int flags,
}
static int check_execveat_invoked_rc(int fd, const char *path, int flags,
- int expected_rc)
+ int expected_rc, int expected_rc2)
{
int status;
int rc;
@@ -98,9 +98,10 @@ static int check_execveat_invoked_rc(int fd, const char *path, int flags,
child, status);
return 1;
}
- if (WEXITSTATUS(status) != expected_rc) {
- printf("[FAIL] (child %d exited with %d not %d)\n",
- child, WEXITSTATUS(status), expected_rc);
+ if ((WEXITSTATUS(status) != expected_rc) &&
+ (WEXITSTATUS(status) != expected_rc2)) {
+ printf("[FAIL] (child %d exited with %d not %d nor %d)\n",
+ child, WEXITSTATUS(status), expected_rc, expected_rc2);
return 1;
}
printf("[OK]\n");
@@ -109,7 +110,7 @@ static int check_execveat_invoked_rc(int fd, const char *path, int flags,
static int check_execveat(int fd, const char *path, int flags)
{
- return check_execveat_invoked_rc(fd, path, flags, 99);
+ return check_execveat_invoked_rc(fd, path, flags, 99, 99);
}
static char *concat(const char *left, const char *right)
@@ -192,9 +193,15 @@ static int check_execveat_pathmax(int dot_dfd, const char *src, int is_script)
* Execute as a long pathname relative to ".". If this is a script,
* the interpreter will launch but fail to open the script because its
* name ("/dev/fd/5/xxx....") is bigger than PATH_MAX.
+ *
+ * The failure code is usually 127 (POSIX: "If a command is not found,
+ * the exit status shall be 127."), but some systems give 126 (POSIX:
+ * "If the command name is found, but it is not an executable utility,
+ * the exit status shall be 126."), so allow either.
*/
if (is_script)
- fail += check_execveat_invoked_rc(dot_dfd, longpath, 0, 127);
+ fail += check_execveat_invoked_rc(dot_dfd, longpath, 0,
+ 127, 126);
else
fail += check_execveat(dot_dfd, longpath, 0);
diff --git a/tools/testing/selftests/mqueue/mq_perf_tests.c b/tools/testing/selftests/mqueue/mq_perf_tests.c
index 94dae65eea41..8519e9ee97e3 100644
--- a/tools/testing/selftests/mqueue/mq_perf_tests.c
+++ b/tools/testing/selftests/mqueue/mq_perf_tests.c
@@ -536,10 +536,9 @@ int main(int argc, char *argv[])
{
struct mq_attr attr;
char *option, *next_option;
- int i, cpu;
+ int i, cpu, rc;
struct sigaction sa;
poptContext popt_context;
- char rc;
void *retval;
main_thread = pthread_self();
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 4c4b1f631ecf..077828c889f1 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -7,7 +7,7 @@ BINARIES += transhuge-stress
all: $(BINARIES)
%: %.c
- $(CC) $(CFLAGS) -o $@ $^
+ $(CC) $(CFLAGS) -o $@ $^ -lrt
run_tests: all
@/bin/sh ./run_vmtests || (echo "vmtests: [FAIL]"; exit 1)