summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--Documentation/core-api/workqueue.rst6
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml2
-rw-r--r--Documentation/translations/zh_CN/core-api/workqueue.rst398
-rw-r--r--MAINTAINERS22
-rw-r--r--arch/arm/net/bpf_jit_32.c56
-rw-r--r--arch/arm64/kvm/vgic/vgic-kvm-device.c8
-rw-r--r--arch/arm64/net/bpf_jit_comp.c6
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c6
-rw-r--r--arch/s390/crypto/paes_s390.c15
-rw-r--r--arch/s390/include/asm/dwarf.h1
-rw-r--r--arch/s390/kernel/vdso64/vdso_user_wrapper.S2
-rw-r--r--arch/s390/mm/gmap.c2
-rw-r--r--arch/s390/mm/hugetlbpage.c2
-rw-r--r--arch/x86/net/bpf_jit_comp.c63
-rw-r--r--arch/xtensa/include/asm/cacheflush.h24
-rw-r--r--arch/xtensa/include/asm/processor.h8
-rw-r--r--arch/xtensa/include/asm/ptrace.h2
-rw-r--r--arch/xtensa/kernel/process.c5
-rw-r--r--arch/xtensa/kernel/stacktrace.c3
-rw-r--r--arch/xtensa/platforms/iss/console.c6
-rw-r--r--drivers/firewire/nosy.c6
-rw-r--r--drivers/firewire/ohci.c8
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c16
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c8
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c6
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_debugfs.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c14
-rw-r--r--drivers/net/vxlan/vxlan_core.c49
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c34
-rw-r--r--drivers/pinctrl/core.c8
-rw-r--r--drivers/pinctrl/devicetree.c10
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c78
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h4
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c40
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-a1.c6
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c14
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c1
-rw-r--r--drivers/power/supply/mt6360_charger.c2
-rw-r--r--drivers/power/supply/rt9455_charger.c2
-rw-r--r--drivers/regulator/irq_helpers.c3
-rw-r--r--drivers/regulator/mt6360-regulator.c32
-rw-r--r--drivers/regulator/qcom-refgen-regulator.c1
-rw-r--r--drivers/regulator/vqmmc-ipq4019-regulator.c1
-rw-r--r--drivers/s390/char/raw3270.c6
-rw-r--r--drivers/s390/cio/cio_inject.c2
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c6
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c48
-rw-r--r--drivers/s390/net/qeth_core_main.c61
-rw-r--r--drivers/scsi/sd.c4
-rw-r--r--drivers/spi/spi-axi-spi-engine.c2
-rw-r--r--drivers/spi/spi-hisi-kunpeng.c2
-rw-r--r--drivers/spi/spi.c1
-rw-r--r--drivers/thermal/thermal_debugfs.c59
-rw-r--r--fs/bcachefs/btree_node_scan.c7
-rw-r--r--fs/bcachefs/btree_node_scan_types.h1
-rw-r--r--fs/bcachefs/buckets.c2
-rw-r--r--fs/bcachefs/inode.c2
-rw-r--r--fs/btrfs/ioctl.c33
-rw-r--r--fs/btrfs/ordered-data.c1
-rw-r--r--fs/btrfs/qgroup.c21
-rw-r--r--fs/btrfs/volumes.c1
-rw-r--r--fs/erofs/fscache.c2
-rw-r--r--fs/erofs/internal.h7
-rw-r--r--fs/erofs/super.c124
-rw-r--r--fs/nfs/inode.c7
-rw-r--r--fs/nfsd/nfs4xdr.c2
-rw-r--r--include/linux/filter.h1
-rw-r--r--include/linux/regulator/consumer.h4
-rw-r--r--include/linux/skmsg.h2
-rw-r--r--include/net/gro.h9
-rw-r--r--kernel/bounds.c2
-rw-r--r--kernel/bpf/core.c9
-rw-r--r--kernel/bpf/verifier.c33
-rw-r--r--kernel/workqueue.c19
-rw-r--r--lib/Kconfig.debug5
-rw-r--r--lib/scatterlist.c2
-rw-r--r--net/8021q/vlan_core.c2
-rw-r--r--net/bridge/br_forward.c2
-rw-r--r--net/core/filter.c42
-rw-r--r--net/core/gro.c1
-rw-r--r--net/core/skbuff.c27
-rw-r--r--net/core/skmsg.c5
-rw-r--r--net/ipv4/af_inet.c1
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/raw.c3
-rw-r--r--net/ipv4/udp.c3
-rw-r--r--net/ipv4/udp_offload.c15
-rw-r--r--net/ipv6/ip6_offload.c1
-rw-r--r--net/ipv6/udp.c3
-rw-r--r--net/ipv6/udp_offload.c3
-rw-r--r--net/l2tp/l2tp_eth.c3
-rw-r--r--net/mptcp/protocol.c3
-rw-r--r--net/nsh/nsh.c14
-rw-r--r--net/rxrpc/conn_object.c9
-rw-r--r--net/rxrpc/insecure.c2
-rw-r--r--net/rxrpc/rxkad.c2
-rw-r--r--net/rxrpc/txbuf.c10
-rw-r--r--net/sunrpc/xprtsock.c1
-rw-r--r--net/tipc/msg.c8
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c3
-rw-r--r--tools/testing/selftests/kvm/aarch64/vgic_init.c49
108 files changed, 1217 insertions, 503 deletions
diff --git a/.mailmap b/.mailmap
index 16b704e1d5d3..9f41b3906b66 100644
--- a/.mailmap
+++ b/.mailmap
@@ -512,6 +512,7 @@ Praveen BP <praveenbp@ti.com>
Pradeep Kumar Chitrapu <quic_pradeepc@quicinc.com> <pradeepc@codeaurora.org>
Prasad Sodagudi <quic_psodagud@quicinc.com> <psodagud@codeaurora.org>
Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
+Puranjay Mohan <puranjay@kernel.org> <puranjay12@gmail.com>
Qais Yousef <qyousef@layalina.io> <qais.yousef@imgtec.com>
Qais Yousef <qyousef@layalina.io> <qais.yousef@arm.com>
Quentin Monnet <qmo@kernel.org> <quentin.monnet@netronome.com>
diff --git a/Documentation/core-api/workqueue.rst b/Documentation/core-api/workqueue.rst
index ed73c612174d..bcc370c876be 100644
--- a/Documentation/core-api/workqueue.rst
+++ b/Documentation/core-api/workqueue.rst
@@ -671,7 +671,7 @@ configuration, worker pools and how workqueues map to the pools: ::
events_unbound unbound 9 9 10 10 8
events_freezable percpu 0 2 4 6
events_power_efficient percpu 0 2 4 6
- events_freezable_power_ percpu 0 2 4 6
+ events_freezable_pwr_ef percpu 0 2 4 6
rcu_gp percpu 0 2 4 6
rcu_par_gp percpu 0 2 4 6
slub_flushwq percpu 0 2 4 6
@@ -694,7 +694,7 @@ Use tools/workqueue/wq_monitor.py to monitor workqueue operations: ::
events_unbound 38306 0 0.1 - 7 - -
events_freezable 0 0 0.0 0 0 - -
events_power_efficient 29598 0 0.2 0 0 - -
- events_freezable_power_ 10 0 0.0 0 0 - -
+ events_freezable_pwr_ef 10 0 0.0 0 0 - -
sock_diag_events 0 0 0.0 0 0 - -
total infl CPUtime CPUhog CMW/RPR mayday rescued
@@ -704,7 +704,7 @@ Use tools/workqueue/wq_monitor.py to monitor workqueue operations: ::
events_unbound 38322 0 0.1 - 7 - -
events_freezable 0 0 0.0 0 0 - -
events_power_efficient 29603 0 0.2 0 0 - -
- events_freezable_power_ 10 0 0.0 0 0 - -
+ events_freezable_pwr_ef 10 0 0.0 0 0 - -
sock_diag_events 0 0 0.0 0 0 - -
...
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml
index d476de82e5c3..4d5a957fa232 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml
@@ -120,7 +120,9 @@ additionalProperties:
slew-rate: true
gpio-hog: true
gpios: true
+ input: true
input-enable: true
+ output-enable: true
output-high: true
output-low: true
line-name: true
diff --git a/Documentation/translations/zh_CN/core-api/workqueue.rst b/Documentation/translations/zh_CN/core-api/workqueue.rst
index 7fac6f75d078..fe0ff5a127f3 100644
--- a/Documentation/translations/zh_CN/core-api/workqueue.rst
+++ b/Documentation/translations/zh_CN/core-api/workqueue.rst
@@ -7,12 +7,13 @@
å¸å»¶è…¾ Yanteng Si <siyanteng@loongson.cn>
周彬彬 Binbin Zhou <zhoubinbin@loongson.cn>
+ é™ˆå…´å‹ Xingyou Chen <rockrush@rockwork.org>
.. _cn_workqueue.rst:
-=========================
-并å‘管ç†çš„工作队列 (cmwq)
-=========================
+========
+工作队列
+========
:日期: September, 2010
:作者: Tejun Heo <tj@kernel.org>
@@ -22,7 +23,7 @@
简介
====
-在很多情况下,需è¦ä¸€ä¸ªå¼‚步进程的执行环境,工作队列(wq)API是这ç§æƒ…况下
+在很多情况下,需è¦ä¸€ä¸ªå¼‚步的程åºæ‰§è¡ŒçŽ¯å¢ƒï¼Œå·¥ä½œé˜Ÿåˆ—(wq)API是这ç§æƒ…况下
最常用的机制。
当需è¦è¿™æ ·ä¸€ä¸ªå¼‚步执行上下文时,一个æè¿°å°†è¦æ‰§è¡Œçš„函数的工作项(work,
@@ -34,8 +35,8 @@
队列时,工作者åˆå¼€å§‹æ‰§è¡Œã€‚
-为什么è¦cmwq?
-=============
+为什么è¦æœ‰å¹¶å‘管ç†å·¥ä½œé˜Ÿåˆ—?
+===========================
在最åˆçš„wq实现中,多线程(MT)wq在æ¯ä¸ªCPU上有一个工作者线程,而å•çº¿ç¨‹
(ST)wq在全系统有一个工作者线程。一个MT wq需è¦ä¿æŒä¸ŽCPUæ•°é‡ç›¸åŒçš„å·¥
@@ -73,9 +74,11 @@
å‘该函数的工作项,并在工作队列中排队等待该工作项。(就是挂到workqueue
队列里é¢åŽ»ï¼‰
-特定目的线程,称为工作线程(工作者),一个接一个地执行队列中的功能。
-如果没有工作项排队,工作者线程就会闲置。这些工作者线程被管ç†åœ¨æ‰€è°“
-的工作者池中。
+工作项å¯ä»¥åœ¨çº¿ç¨‹æˆ–BH(软中断)上下文中执行。
+
+对于由线程执行的工作队列,被称为(内核)工作者([k]worker)的特殊
+线程会ä¾æ¬¡æ‰§è¡Œå…¶ä¸­çš„函数。如果没有工作项排队,工作者线程就会闲置。
+这些工作者线程被管ç†åœ¨æ‰€è°“的工作者池中。
cmwq设计区分了é¢å‘用户的工作队列,å­ç³»ç»Ÿå’Œé©±åŠ¨ç¨‹åºåœ¨ä¸Šé¢æŽ’队工作,
以åŠç®¡ç†å·¥ä½œè€…池和处ç†æŽ’队工作项的åŽç«¯æœºåˆ¶ã€‚
@@ -84,6 +87,10 @@ cmwq设计区分了é¢å‘用户的工作队列,å­ç³»ç»Ÿå’Œé©±åŠ¨ç¨‹åºåœ¨ä¸Šé
优先级的工作项,还有一些é¢å¤–的工作者池,用于æœåŠ¡æœªç»‘定工作队列的工
作项目——这些åŽå¤‡æ± çš„æ•°é‡æ˜¯åŠ¨æ€çš„。
+BH工作队列使用相åŒçš„结构。然而,由于åŒä¸€æ—¶é—´åªå¯èƒ½æœ‰ä¸€ä¸ªæ‰§è¡Œä¸Šä¸‹æ–‡ï¼Œ
+ä¸éœ€è¦æ‹…心并å‘问题。æ¯ä¸ªCPU上的BH工作者池åªåŒ…å«ä¸€ä¸ªç”¨äºŽè¡¨ç¤ºBH执行
+上下文的虚拟工作者。BH工作队列å¯ä»¥è¢«çœ‹ä½œè½¯ä¸­æ–­çš„便æ·æŽ¥å£ã€‚
+
当他们认为åˆé€‚的时候,å­ç³»ç»Ÿå’Œé©±åŠ¨ç¨‹åºå¯ä»¥é€šè¿‡ç‰¹æ®Šçš„
``workqueue API`` 函数创建和排队工作项。他们å¯ä»¥é€šè¿‡åœ¨å·¥ä½œé˜Ÿåˆ—上
设置标志æ¥å½±å“工作项执行方å¼çš„æŸäº›æ–¹é¢ï¼Œä»–们把工作项放在那里。这些
@@ -95,9 +102,9 @@ cmwq设计区分了é¢å‘用户的工作队列,å­ç³»ç»Ÿå’Œé©±åŠ¨ç¨‹åºåœ¨ä¸Šé
å¦åˆ™ä¸€ä¸ªç»‘定的工作队列的工作项将被排在与å‘起线程è¿è¡Œçš„CPU相关的普
通或高级工作工作者池的工作项列表中。
-对于任何工作者池的实施,管ç†å¹¶å‘水平(有多少执行上下文处于活动状
-æ€ï¼‰æ˜¯ä¸€ä¸ªé‡è¦é—®é¢˜ã€‚最低水平是为了节çœèµ„æºï¼Œè€Œé¥±å’Œæ°´å¹³æ˜¯æŒ‡ç³»ç»Ÿè¢«
-充分使用。
+对于任何线程池的实施,管ç†å¹¶å‘水平(有多少执行上下文处于活动状
+æ€ï¼‰æ˜¯ä¸€ä¸ªé‡è¦é—®é¢˜ã€‚cmwq试图将并å‘ä¿æŒåœ¨ä¸€ä¸ªå°½å¯èƒ½ä½Žä¸”充足的
+水平。最低水平是为了节çœèµ„æºï¼Œè€Œå……足是为了使系统能被充分使用。
æ¯ä¸ªä¸Žå®žé™…CPU绑定的worker-pool通过钩ä½è°ƒåº¦å™¨æ¥å®žçŽ°å¹¶å‘管ç†ã€‚æ¯å½“
一个活动的工作者被唤醒或ç¡çœ æ—¶ï¼Œå·¥ä½œè€…池就会得到通知,并跟踪当å‰å¯
@@ -140,6 +147,17 @@ workqueue将自动创建与属性相匹é…çš„åŽå¤‡å·¥ä½œè€…池。调节并å‘æ°
``flags``
---------
+``WQ_BH``
+ BH工作队列å¯ä»¥è¢«çœ‹ä½œè½¯ä¸­æ–­çš„便æ·æŽ¥å£ã€‚它总是æ¯ä¸ªCPU一份,
+ 其中的å„个工作项也会按在队列中的顺åºï¼Œè¢«æ‰€å±žCPU在软中断
+ 上下文中执行。
+
+ BH工作队列的 ``max_active`` 值必须为0,且åªèƒ½å•ç‹¬æˆ–å’Œ
+ ``WQ_HIGHPRI`` 标志组åˆä½¿ç”¨ã€‚
+
+ BH工作项ä¸å¯ä»¥ç¡çœ ã€‚åƒå»¶è¿ŸæŽ’队ã€å†²æ´—ã€å–消等所有其他特性
+ 都是支æŒçš„。
+
``WQ_UNBOUND``
排队到éžç»‘定wq的工作项由特殊的工作者池æä¾›æœåŠ¡ï¼Œè¿™äº›å·¥ä½œè€…ä¸
绑定在任何特定的CPU上。这使得wq表现得åƒä¸€ä¸ªç®€å•çš„执行环境æ
@@ -184,25 +202,21 @@ workqueue将自动创建与属性相匹é…çš„åŽå¤‡å·¥ä½œè€…池。调节并å‘æ°
--------------
``@max_active`` 决定了æ¯ä¸ªCPUå¯ä»¥åˆ†é…ç»™wq的工作项的最大执行上
-下文数é‡ã€‚例如,如果 ``@max_active为16`` ,æ¯ä¸ªCPU最多å¯ä»¥åŒ
-时执行16个wq的工作项。
+下文数é‡ã€‚例如,如果 ``@max_active`` 为16 ,æ¯ä¸ªCPU最多å¯ä»¥åŒ
+时执行16个wq的工作项。它总是æ¯CPU属性,å³ä¾¿å¯¹äºŽæœªç»‘定 wq。
-ç›®å‰ï¼Œå¯¹äºŽä¸€ä¸ªç»‘定的wq, ``@max_active`` 的最大é™åˆ¶æ˜¯512,当指
-定为0时使用的默认值是256。对于éžç»‘定的wq,其é™åˆ¶æ˜¯512å’Œ
-4 * ``num_possible_cpus()`` 中的较高值。这些值被选得足够高,所
-以它们ä¸æ˜¯é™åˆ¶æ€§å› ç´ ï¼ŒåŒæ—¶ä¼šåœ¨å¤±æŽ§æƒ…况下æä¾›ä¿æŠ¤ã€‚
+``@max_active`` 的最大é™åˆ¶æ˜¯512,当指定为0时使用的默认值是256。
+这些值被选得足够高,所以它们ä¸æ˜¯é™åˆ¶æ€§å› ç´ ï¼ŒåŒæ—¶ä¼šåœ¨å¤±æŽ§æƒ…况下æä¾›
+ä¿æŠ¤ã€‚
一个wq的活动工作项的数é‡é€šå¸¸ç”±wq的用户æ¥è°ƒèŠ‚,更具体地说,是由用
户在åŒä¸€æ—¶é—´å¯ä»¥æŽ’列多少个工作项æ¥è°ƒèŠ‚。除éžæœ‰ç‰¹å®šçš„需求æ¥æŽ§åˆ¶æ´»åŠ¨
工作项的数é‡ï¼Œå¦åˆ™å»ºè®®æŒ‡å®š 为"0"。
-一些用户ä¾èµ–于ST wq的严格执行顺åºã€‚ ``@max_active`` 为1å’Œ ``WQ_UNBOUND``
-的组åˆç”¨æ¥å®žçŽ°è¿™ç§è¡Œä¸ºã€‚è¿™ç§wq上的工作项目总是被排到未绑定的工作池
-中,并且在任何时候都åªæœ‰ä¸€ä¸ªå·¥ä½œé¡¹ç›®å¤„于活动状æ€ï¼Œä»Žè€Œå®žçŽ°ä¸ŽST wq相
-åŒçš„排åºå±žæ€§ã€‚
-
-在目å‰çš„实现中,上述é…ç½®åªä¿è¯äº†ç‰¹å®šNUMA节点内的ST行为。相å,
-``alloc_ordered_workqueue()`` 应该被用æ¥å®žçŽ°å…¨ç³»ç»Ÿçš„ST行为。
+一些用户ä¾èµ–于任æ„时刻最多åªæœ‰ä¸€ä¸ªå·¥ä½œé¡¹è¢«æ‰§è¡Œï¼Œä¸”å„工作项被按队列中
+顺åºå¤„ç†å¸¦æ¥çš„严格执行顺åºã€‚``@max_active`` 为1å’Œ ``WQ_UNBOUND``
+的组åˆæ›¾è¢«ç”¨æ¥å®žçŽ°è¿™ç§è¡Œä¸ºï¼ŒçŽ°åœ¨ä¸ç”¨äº†ã€‚请使用
+``alloc_ordered_workqueue()`` 。
执行场景示例
@@ -285,7 +299,7 @@ And with cmwq with ``@max_active`` >= 3, ::
* 除éžæœ‰ç‰¹æ®Šéœ€è¦ï¼Œå»ºè®®ä½¿ç”¨0作为@max_active。在大多数使用情
况下,并å‘水平通常ä¿æŒåœ¨é»˜è®¤é™åˆ¶ä¹‹ä¸‹ã€‚
-* 一个wq作为å‰è¿›è¿›åº¦ä¿è¯ï¼ˆWQ_MEM_RECLAIM,冲洗(flush)和工
+* 一个wq作为å‰è¿›è¿›åº¦ä¿è¯ï¼Œ``WQ_MEM_RECLAIM`` ,冲洗(flush)和工
作项属性的域。ä¸æ¶‰åŠå†…存回收的工作项,ä¸éœ€è¦ä½œä¸ºå·¥ä½œé¡¹ç»„的一
部分被刷新,也ä¸éœ€è¦ä»»ä½•ç‰¹æ®Šå±žæ€§ï¼Œå¯ä»¥ä½¿ç”¨ç³»ç»Ÿä¸­çš„一个wq。使
用专用wq和系统wq在执行特性上没有区别。
@@ -294,6 +308,337 @@ And with cmwq with ``@max_active`` >= 3, ::
益的,因为wqæ“作和工作项执行中的定ä½æ°´å¹³æ高了。
+亲和性作用域
+============
+
+一个éžç»‘定工作队列根æ®å…¶äº²å’Œæ€§ä½œç”¨åŸŸæ¥å¯¹CPU进行分组以æ高缓存
+局部性。比如如果一个工作队列使用默认的“cacheâ€äº²å’Œæ€§ä½œç”¨åŸŸï¼Œ
+它将根æ®æœ€åŽä¸€çº§ç¼“存的边界æ¥åˆ†ç»„处ç†å™¨ã€‚这个工作队列上的工作项
+将被分é…给一个与å‘èµ·CPU共用最åŽçº§ç¼“存的处ç†å™¨ä¸Šçš„工作者。根æ®
+``affinity_strict`` 的设置,工作者在å¯åŠ¨åŽå¯èƒ½è¢«å…许移出
+所在作用域,也å¯èƒ½ä¸è¢«å…许。
+
+工作队列目å‰æ”¯æŒä»¥ä¸‹äº²å’Œæ€§ä½œç”¨åŸŸã€‚
+
+``default``
+ 使用模å—å‚æ•° ``workqueue.default_affinity_scope`` 指定
+ 的作用域,该å‚数总是会被设为以下作用域中的一个。
+
+``cpu``
+ CPUä¸è¢«åˆ†ç»„。一个CPU上å‘起的工作项会被åŒä¸€CPU上的工作者执行。
+ 这使éžç»‘定工作队列表现得åƒæ˜¯ä¸å«å¹¶å‘管ç†çš„æ¯CPU工作队列。
+
+``smt``
+ CPU被按SMT边界分组。这通常æ„味ç€æ¯ä¸ªç‰©ç†CPU核上的å„逻辑CPU会
+ 被分进åŒä¸€ç»„。
+
+``cache``
+ CPU被按缓存边界分组。采用哪个缓存边界由架构代ç å†³å®šã€‚很多情况
+ 下会使用L3。这是默认的亲和性作用域。
+
+``numa``
+ CPU被按NUMA边界分组。
+
+``system``
+ 所有CPU被放在åŒä¸€ç»„。工作队列ä¸å°è¯•åœ¨ä¸´è¿‘å‘èµ·CPUçš„CPU上è¿è¡Œ
+ 工作项。
+
+默认的亲和性作用域å¯ä»¥è¢«æ¨¡å—å‚æ•° ``workqueue.default_affinity_scope``
+修改,特定工作队列的亲和性作用域å¯ä»¥é€šè¿‡ ``apply_workqueue_attrs()``
+被更改。
+
+如果设置了 ``WQ_SYSFS`` ,工作队列会在它的 ``/sys/devices/virtual/workqueue/WQ_NAME/``
+目录中有以下亲和性作用域相关的接å£æ–‡ä»¶ã€‚
+
+``affinity_scope``
+ 读æ“作以查看当å‰çš„亲和性作用域。写æ“作用于更改设置。
+
+ 当å‰ä½œç”¨åŸŸæ˜¯é»˜è®¤å€¼æ—¶ï¼Œå½“å‰ç”Ÿæ•ˆçš„作用域也å¯ä»¥è¢«ä»Žè¿™ä¸ªæ–‡ä»¶ä¸­
+ 读到(å°æ‹¬å·å†…),例如 ``default (cache)`` 。
+
+``affinity_strict``
+ 默认值0表明亲和性作用域ä¸æ˜¯ä¸¥æ ¼çš„。当一个工作项开始执行时,
+ 工作队列尽é‡å°è¯•ä½¿å·¥ä½œè€…处于亲和性作用域内,称为é£è¿”。å¯åŠ¨åŽï¼Œ
+ 调度器å¯ä»¥è‡ªç”±åœ°å°†å·¥ä½œè€…调度到系统中任æ„它认为åˆé€‚的地方去。
+ 这使得在ä¿ç•™ä½¿ç”¨å…¶ä»–CPU(如果必需且有å¯ç”¨ï¼‰èƒ½åŠ›çš„åŒæ—¶ï¼Œ
+ 还能从作用域局部性上获益。
+
+ 如果设置为1,作用域内的所有工作者将被ä¿è¯æ€»æ˜¯å¤„于作用域内。
+ 这在跨亲和性作用域会导致如功耗ã€è´Ÿè½½éš”离等方é¢çš„潜在影å“æ—¶
+ 会有用。严格的NUMA作用域也å¯ç”¨äºŽå’Œæ—§ç‰ˆå†…核中工作队列的行为
+ ä¿æŒä¸€è‡´ã€‚
+
+
+亲和性作用域与性能
+==================
+
+如果éžç»‘定工作队列的行为对ç»å¤§å¤šæ•°ä½¿ç”¨åœºæ™¯æ¥è¯´éƒ½æ˜¯æœ€ä¼˜çš„,
+ä¸éœ€è¦æ›´å¤šè°ƒèŠ‚,就完美了。很ä¸å¹¸ï¼Œåœ¨å½“å‰å†…核中,é‡åº¦ä½¿ç”¨
+工作队列时,需è¦åœ¨å±€éƒ¨æ€§å’Œåˆ©ç”¨çŽ‡é—´æ˜¾å¼åœ°ä½œä¸€ä¸ªæ˜Žæ˜¾çš„æƒè¡¡ã€‚
+
+更高的局部性带æ¥æ›´é«˜æ•ˆçŽ‡ï¼Œä¹Ÿå°±æ˜¯ç›¸åŒæ•°é‡çš„CPU周期内å¯ä»¥åš
+更多工作。然而,如果å‘起者没能将工作项充分地分散在亲和性
+作用域间,更高的局部性也å¯èƒ½å¸¦æ¥æ›´ä½Žçš„整体系统利用率。以下
+dm-crypt 的性能测试清楚地é˜æ˜Žäº†è¿™ä¸€å–èˆã€‚
+
+测试è¿è¡Œåœ¨ä¸€ä¸ª12æ ¸24线程ã€4个L3缓存的处ç†å™¨ï¼ˆAMD Ryzen
+9 3900x)上。为ä¿æŒä¸€è‡´æ€§ï¼Œå…³é—­CPU超频。 ``/dev/dm-0``
+是NVME SSD(三星 990 PRO)上创建,用 ``cryptsetup``
+以默认é…置打开的一个 dm-crypt 设备。
+
+
+场景 1: 机器上é布ç€æœ‰å……足的å‘起者和工作é‡
+------------------------------------------
+
+使用命令:::
+
+ $ fio --filename=/dev/dm-0 --direct=1 --rw=randrw --bs=32k --ioengine=libaio \
+ --iodepth=64 --runtime=60 --numjobs=24 --time_based --group_reporting \
+ --name=iops-test-job --verify=sha512
+
+这里有24个å‘起者,æ¯ä¸ªåŒæ—¶å‘èµ·64个IO。 ``--verify=sha512``
+使得 ``fio`` æ¯æ¬¡ç”Ÿæˆå’Œè¯»å›žå†…容å—å‘起者和 ``kcryptd``
+间的执行局部性影å“。下é¢æ˜¯åŸºäºŽä¸åŒ ``kcryptd`` 的亲和性
+作用域设置,å„ç»è¿‡äº”次测试得到的读å–带宽和CPU利用率数æ®ã€‚
+
+.. list-table::
+ :widths: 16 20 20
+ :header-rows: 1
+
+ * - 亲和性
+ - 带宽 (MiBps)
+ - CPU利用率(%)
+
+ * - system
+ - 1159.40 ±1.34
+ - 99.31 ±0.02
+
+ * - cache
+ - 1166.40 ±0.89
+ - 99.34 ±0.01
+
+ * - cache (strict)
+ - 1166.00 ±0.71
+ - 99.35 ±0.01
+
+在系统中分布ç€è¶³å¤Ÿå¤šå‘起者的情况下,ä¸è®ºä¸¥æ ¼ä¸Žå¦ï¼Œâ€œcacheâ€
+没有表现得更差。三ç§é…ç½®å‡ä½¿æ•´ä¸ªæœºå™¨è¾¾åˆ°é¥±å’Œï¼Œä½†ç”±äºŽæ高了
+局部性,缓存相关的两ç§æœ‰0.6%的(带宽)æå‡ã€‚
+
+
+场景 2: æ›´å°‘å‘起者,足以达到饱和的工作é‡
+----------------------------------------
+
+使用命令:::
+
+ $ fio --filename=/dev/dm-0 --direct=1 --rw=randrw --bs=32k \
+ --ioengine=libaio --iodepth=64 --runtime=60 --numjobs=8 \
+ --time_based --group_reporting --name=iops-test-job --verify=sha512
+
+与上一个场景唯一的区别是 ``--numjobs=8``。 å‘起者数é‡
+å‡å°‘为三分之一,但ä»ç„¶æœ‰è¶³ä»¥ä½¿ç³»ç»Ÿè¾¾åˆ°é¥±å’Œçš„工作总é‡ã€‚
+
+.. list-table::
+ :widths: 16 20 20
+ :header-rows: 1
+
+ * - 亲和性
+ - 带宽 (MiBps)
+ - CPU利用率(%)
+
+ * - system
+ - 1155.40 ±0.89
+ - 97.41 ±0.05
+
+ * - cache
+ - 1154.40 ±1.14
+ - 96.15 ±0.09
+
+ * - cache (strict)
+ - 1112.00 ±4.64
+ - 93.26 ±0.35
+
+这里有超过使系统达到饱和所需的工作é‡ã€‚“systemâ€å’Œâ€œcacheâ€
+都接近但并未使机器完全饱和。“cacheâ€æ¶ˆè€—æ›´å°‘çš„CPU但更高的
+效率使其得到和“systemâ€ç›¸åŒçš„带宽。
+
+八个å‘起者盘桓在四个L3缓存作用域间ä»ç„¶å…许“cache (strict)â€
+几乎使机器饱和,但缺少对工作的ä¿æŒï¼ˆä¸ç§»åˆ°ç©ºé—²å¤„ç†å™¨ä¸Šï¼‰
+开始带æ¥3.7%的带宽æŸå¤±ã€‚
+
+
+场景 3: æ›´å°‘å‘起者,ä¸å……足的工作é‡
+----------------------------------
+
+使用命令:::
+
+ $ fio --filename=/dev/dm-0 --direct=1 --rw=randrw --bs=32k \
+ --ioengine=libaio --iodepth=64 --runtime=60 --numjobs=4 \
+ --time_based --group_reporting --name=iops-test-job --verify=sha512
+
+å†æ¬¡ï¼Œå”¯ä¸€çš„区别是 ``--numjobs=4``。由于å‘起者å‡å°‘到四个,
+现在没有足以使系统饱和的工作é‡ï¼Œå¸¦å®½å˜å¾—ä¾èµ–于完æˆæ—¶å»¶ã€‚
+
+.. list-table::
+ :widths: 16 20 20
+ :header-rows: 1
+
+ * - 亲和性
+ - 带宽 (MiBps)
+ - CPU利用率(%)
+
+ * - system
+ - 993.60 ±1.82
+ - 75.49 ±0.06
+
+ * - cache
+ - 973.40 ±1.52
+ - 74.90 ±0.07
+
+ * - cache (strict)
+ - 828.20 ±4.49
+ - 66.84 ±0.29
+
+现在,局部性和利用率间的æƒè¡¡æ›´æ¸…晰了。“cacheâ€å±•ç¤ºå‡ºç›¸æ¯”
+“systemâ€2%的带宽æŸå¤±ï¼Œè€Œâ€œcache (strict)â€è·Œåˆ°20%。
+
+
+结论和建议
+----------
+
+在以上试验中,虽然一致并且也明显,但“cacheâ€äº²å’Œæ€§ä½œç”¨åŸŸ
+相比“systemâ€çš„性能优势并ä¸å¤§ã€‚然而,这影å“是ä¾èµ–于作用域
+é—´è·ç¦»çš„,在更å¤æ‚的处ç†å™¨æ‹“扑下å¯èƒ½æœ‰æ›´æ˜Žæ˜¾çš„å½±å“。
+
+虽然这些情形下缺少工作ä¿æŒæ˜¯æœ‰å处的,但比“cache (strict)â€
+好多了,而且最大化工作队列利用率的需求也并ä¸å¸¸è§ã€‚因此,
+“cacheâ€æ˜¯éžç»‘定池的默认亲和性作用域。
+
+* 由于ä¸å­˜åœ¨ä¸€ä¸ªé€‚用于大多数场景的选择,对于å¯èƒ½éœ€è¦æ¶ˆè€—
+ 大é‡CPU的工作队列,建议通过 ``apply_workqueue_attrs()``
+ 进行(专门)é…置,并考虑是å¦å¯ç”¨ ``WQ_SYSFS``。
+
+* 设置了严格“cpuâ€äº²å’Œæ€§ä½œç”¨åŸŸçš„éžç»‘定工作队列,它的行为与
+ ``WQ_CPU_INTENSIVE`` æ¯CPU工作队列一样。åŽè€…没有真正
+ 优势,而å‰è€…æ供了大幅度的çµæ´»æ€§ã€‚
+
+* 亲和性作用域是从Linux v6.5起引入的。为了模拟旧版行为,
+ å¯ä»¥ä½¿ç”¨ä¸¥æ ¼çš„“numaâ€äº²å’Œæ€§ä½œç”¨åŸŸã€‚
+
+* ä¸ä¸¥æ ¼çš„亲和性作用域中,缺少工作ä¿æŒå¤§æ¦‚缘于调度器。内核
+ 为什么没能维护好大多数场景下的工作ä¿æŒï¼ŒæŠŠäº‹æƒ…作对,还没有
+ ç†è®ºä¸Šçš„解释。因此,未æ¥è°ƒåº¦å™¨çš„改进å¯èƒ½ä¼šä½¿æˆ‘们ä¸å†éœ€è¦
+ 这些调节项。
+
+
+检查é…ç½®
+========
+
+使用 tools/workqueue/wq_dump.py(drgn脚本) æ¥æ£€æŸ¥æœª
+绑定CPU的亲和性é…置,工作者池,以åŠå·¥ä½œé˜Ÿåˆ—如何映射到池上: ::
+
+ $ tools/workqueue/wq_dump.py
+ Affinity Scopes
+ ===============
+ wq_unbound_cpumask=0000000f
+
+ CPU
+ nr_pods 4
+ pod_cpus [0]=00000001 [1]=00000002 [2]=00000004 [3]=00000008
+ pod_node [0]=0 [1]=0 [2]=1 [3]=1
+ cpu_pod [0]=0 [1]=1 [2]=2 [3]=3
+
+ SMT
+ nr_pods 4
+ pod_cpus [0]=00000001 [1]=00000002 [2]=00000004 [3]=00000008
+ pod_node [0]=0 [1]=0 [2]=1 [3]=1
+ cpu_pod [0]=0 [1]=1 [2]=2 [3]=3
+
+ CACHE (default)
+ nr_pods 2
+ pod_cpus [0]=00000003 [1]=0000000c
+ pod_node [0]=0 [1]=1
+ cpu_pod [0]=0 [1]=0 [2]=1 [3]=1
+
+ NUMA
+ nr_pods 2
+ pod_cpus [0]=00000003 [1]=0000000c
+ pod_node [0]=0 [1]=1
+ cpu_pod [0]=0 [1]=0 [2]=1 [3]=1
+
+ SYSTEM
+ nr_pods 1
+ pod_cpus [0]=0000000f
+ pod_node [0]=-1
+ cpu_pod [0]=0 [1]=0 [2]=0 [3]=0
+
+ Worker Pools
+ ============
+ pool[00] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 0
+ pool[01] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 0
+ pool[02] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 1
+ pool[03] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 1
+ pool[04] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 2
+ pool[05] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 2
+ pool[06] ref= 1 nice= 0 idle/workers= 3/ 3 cpu= 3
+ pool[07] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 3
+ pool[08] ref=42 nice= 0 idle/workers= 6/ 6 cpus=0000000f
+ pool[09] ref=28 nice= 0 idle/workers= 3/ 3 cpus=00000003
+ pool[10] ref=28 nice= 0 idle/workers= 17/ 17 cpus=0000000c
+ pool[11] ref= 1 nice=-20 idle/workers= 1/ 1 cpus=0000000f
+ pool[12] ref= 2 nice=-20 idle/workers= 1/ 1 cpus=00000003
+ pool[13] ref= 2 nice=-20 idle/workers= 1/ 1 cpus=0000000c
+
+ Workqueue CPU -> pool
+ =====================
+ [ workqueue \ CPU 0 1 2 3 dfl]
+ events percpu 0 2 4 6
+ events_highpri percpu 1 3 5 7
+ events_long percpu 0 2 4 6
+ events_unbound unbound 9 9 10 10 8
+ events_freezable percpu 0 2 4 6
+ events_power_efficient percpu 0 2 4 6
+ events_freezable_power_ percpu 0 2 4 6
+ rcu_gp percpu 0 2 4 6
+ rcu_par_gp percpu 0 2 4 6
+ slub_flushwq percpu 0 2 4 6
+ netns ordered 8 8 8 8 8
+ ...
+
+å‚è§å‘½ä»¤çš„帮助消æ¯ä»¥èŽ·å–更多信æ¯ã€‚
+
+
+监视
+====
+
+使用 tools/workqueue/wq_monitor.py æ¥ç›‘视工作队列的è¿è¡Œï¼š ::
+
+ $ tools/workqueue/wq_monitor.py events
+ total infl CPUtime CPUhog CMW/RPR mayday rescued
+ events 18545 0 6.1 0 5 - -
+ events_highpri 8 0 0.0 0 0 - -
+ events_long 3 0 0.0 0 0 - -
+ events_unbound 38306 0 0.1 - 7 - -
+ events_freezable 0 0 0.0 0 0 - -
+ events_power_efficient 29598 0 0.2 0 0 - -
+ events_freezable_power_ 10 0 0.0 0 0 - -
+ sock_diag_events 0 0 0.0 0 0 - -
+
+ total infl CPUtime CPUhog CMW/RPR mayday rescued
+ events 18548 0 6.1 0 5 - -
+ events_highpri 8 0 0.0 0 0 - -
+ events_long 3 0 0.0 0 0 - -
+ events_unbound 38322 0 0.1 - 7 - -
+ events_freezable 0 0 0.0 0 0 - -
+ events_power_efficient 29603 0 0.2 0 0 - -
+ events_freezable_power_ 10 0 0.0 0 0 - -
+ sock_diag_events 0 0 0.0 0 0 - -
+
+ ...
+
+å‚è§å‘½ä»¤çš„帮助消æ¯ä»¥èŽ·å–更多信æ¯ã€‚
+
+
调试
====
@@ -330,7 +675,6 @@ And with cmwq with ``@max_active`` >= 3, ::
工作队列ä¿è¯ï¼Œå¦‚果在工作项排队åŽæ»¡è¶³ä»¥ä¸‹æ¡ä»¶ï¼Œåˆ™å·¥ä½œé¡¹ä¸èƒ½é‡å…¥ï¼š
-
1. 工作函数没有被改å˜ã€‚
2. 没有人将该工作项排到å¦ä¸€ä¸ªå·¥ä½œé˜Ÿåˆ—中。
3. 该工作项尚未被é‡æ–°å¯åŠ¨ã€‚
diff --git a/MAINTAINERS b/MAINTAINERS
index f6dc90559341..ec0284125e8f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -553,7 +553,7 @@ F: Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml
F: drivers/input/misc/adxl34x.c
ADXL355 THREE-AXIS DIGITAL ACCELEROMETER DRIVER
-M: Puranjay Mohan <puranjay12@gmail.com>
+M: Puranjay Mohan <puranjay@kernel.org>
L: linux-iio@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/iio/accel/adi,adxl355.yaml
@@ -3714,7 +3714,7 @@ F: drivers/iio/imu/bmi323/
BPF JIT for ARM
M: Russell King <linux@armlinux.org.uk>
-M: Puranjay Mohan <puranjay12@gmail.com>
+M: Puranjay Mohan <puranjay@kernel.org>
L: bpf@vger.kernel.org
S: Maintained
F: arch/arm/net/
@@ -3764,6 +3764,8 @@ X: arch/riscv/net/bpf_jit_comp64.c
BPF JIT for RISC-V (64-bit)
M: Björn Töpel <bjorn@kernel.org>
+R: Pu Lehui <pulehui@huawei.com>
+R: Puranjay Mohan <puranjay@kernel.org>
L: bpf@vger.kernel.org
S: Maintained
F: arch/riscv/net/
@@ -4191,7 +4193,6 @@ S: Supported
F: drivers/scsi/bnx2i/
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
-M: Ariel Elior <aelior@marvell.com>
M: Sudarsana Kalluru <skalluru@marvell.com>
M: Manish Chopra <manishc@marvell.com>
L: netdev@vger.kernel.org
@@ -15160,9 +15161,8 @@ F: drivers/scsi/myrb.*
F: drivers/scsi/myrs.*
MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
-M: Chris Lee <christopher.lee@cspi.com>
L: netdev@vger.kernel.org
-S: Supported
+S: Orphan
W: https://www.cspi.com/ethernet-products/support/downloads/
F: drivers/net/ethernet/myricom/myri10ge/
@@ -17990,7 +17990,6 @@ S: Supported
F: drivers/scsi/qedi/
QLOGIC QL4xxx ETHERNET DRIVER
-M: Ariel Elior <aelior@marvell.com>
M: Manish Chopra <manishc@marvell.com>
L: netdev@vger.kernel.org
S: Supported
@@ -18000,7 +17999,6 @@ F: include/linux/qed/
QLOGIC QL4xxx RDMA DRIVER
M: Michal Kalderon <mkalderon@marvell.com>
-M: Ariel Elior <aelior@marvell.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/hw/qedr/
@@ -21918,7 +21916,7 @@ F: include/linux/soc/ti/ti_sci_inta_msi.h
F: include/linux/soc/ti/ti_sci_protocol.h
TEXAS INSTRUMENTS' TMP117 TEMPERATURE SENSOR DRIVER
-M: Puranjay Mohan <puranjay12@gmail.com>
+M: Puranjay Mohan <puranjay@kernel.org>
L: linux-iio@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/iio/temperature/ti,tmp117.yaml
@@ -24459,6 +24457,14 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/har
F: Documentation/admin-guide/LSM/Yama.rst
F: security/yama/
+YAML NETLINK (YNL)
+M: Donald Hunter <donald.hunter@gmail.com>
+M: Jakub Kicinski <kuba@kernel.org>
+F: Documentation/netlink/
+F: Documentation/userspace-api/netlink/intro-specs.rst
+F: Documentation/userspace-api/netlink/specs.rst
+F: tools/net/ynl/
+
YEALINK PHONE DRIVER
M: Henk Vergonet <Henk.Vergonet@gmail.com>
L: usbb2k-api-dev@nongnu.org
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 1d672457d02f..72b5cd697f5d 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -871,16 +871,11 @@ static inline void emit_a32_alu_r64(const bool is64, const s8 dst[],
}
/* dst = src (4 bytes)*/
-static inline void emit_a32_mov_r(const s8 dst, const s8 src, const u8 off,
- struct jit_ctx *ctx) {
+static inline void emit_a32_mov_r(const s8 dst, const s8 src, struct jit_ctx *ctx) {
const s8 *tmp = bpf2a32[TMP_REG_1];
s8 rt;
rt = arm_bpf_get_reg32(src, tmp[0], ctx);
- if (off && off != 32) {
- emit(ARM_LSL_I(rt, rt, 32 - off), ctx);
- emit(ARM_ASR_I(rt, rt, 32 - off), ctx);
- }
arm_bpf_put_reg32(dst, rt, ctx);
}
@@ -889,15 +884,15 @@ static inline void emit_a32_mov_r64(const bool is64, const s8 dst[],
const s8 src[],
struct jit_ctx *ctx) {
if (!is64) {
- emit_a32_mov_r(dst_lo, src_lo, 0, ctx);
+ emit_a32_mov_r(dst_lo, src_lo, ctx);
if (!ctx->prog->aux->verifier_zext)
/* Zero out high 4 bytes */
emit_a32_mov_i(dst_hi, 0, ctx);
} else if (__LINUX_ARM_ARCH__ < 6 &&
ctx->cpu_architecture < CPU_ARCH_ARMv5TE) {
/* complete 8 byte move */
- emit_a32_mov_r(dst_lo, src_lo, 0, ctx);
- emit_a32_mov_r(dst_hi, src_hi, 0, ctx);
+ emit_a32_mov_r(dst_lo, src_lo, ctx);
+ emit_a32_mov_r(dst_hi, src_hi, ctx);
} else if (is_stacked(src_lo) && is_stacked(dst_lo)) {
const u8 *tmp = bpf2a32[TMP_REG_1];
@@ -917,17 +912,52 @@ static inline void emit_a32_mov_r64(const bool is64, const s8 dst[],
static inline void emit_a32_movsx_r64(const bool is64, const u8 off, const s8 dst[], const s8 src[],
struct jit_ctx *ctx) {
const s8 *tmp = bpf2a32[TMP_REG_1];
- const s8 *rt;
+ s8 rs;
+ s8 rd;
- rt = arm_bpf_get_reg64(dst, tmp, ctx);
+ if (is_stacked(dst_lo))
+ rd = tmp[1];
+ else
+ rd = dst_lo;
+ rs = arm_bpf_get_reg32(src_lo, rd, ctx);
+ /* rs may be one of src[1], dst[1], or tmp[1] */
+
+ /* Sign extend rs if needed. If off == 32, lower 32-bits of src are moved to dst and sign
+ * extension only happens in the upper 64 bits.
+ */
+ if (off != 32) {
+ /* Sign extend rs into rd */
+ emit(ARM_LSL_I(rd, rs, 32 - off), ctx);
+ emit(ARM_ASR_I(rd, rd, 32 - off), ctx);
+ } else {
+ rd = rs;
+ }
+
+ /* Write rd to dst_lo
+ *
+ * Optimization:
+ * Assume:
+ * 1. dst == src and stacked.
+ * 2. off == 32
+ *
+ * In this case src_lo was loaded into rd(tmp[1]) but rd was not sign extended as off==32.
+ * So, we don't need to write rd back to dst_lo as they have the same value.
+ * This saves us one str instruction.
+ */
+ if (dst_lo != src_lo || off != 32)
+ arm_bpf_put_reg32(dst_lo, rd, ctx);
- emit_a32_mov_r(dst_lo, src_lo, off, ctx);
if (!is64) {
if (!ctx->prog->aux->verifier_zext)
/* Zero out high 4 bytes */
emit_a32_mov_i(dst_hi, 0, ctx);
} else {
- emit(ARM_ASR_I(rt[0], rt[1], 31), ctx);
+ if (is_stacked(dst_hi)) {
+ emit(ARM_ASR_I(tmp[0], rd, 31), ctx);
+ arm_bpf_put_reg32(dst_hi, tmp[0], ctx);
+ } else {
+ emit(ARM_ASR_I(dst_hi, rd, 31), ctx);
+ }
}
}
diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c
index f48b8dab8b3d..1d26bb5b02f4 100644
--- a/arch/arm64/kvm/vgic/vgic-kvm-device.c
+++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c
@@ -338,12 +338,12 @@ int kvm_register_vgic_device(unsigned long type)
int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
struct vgic_reg_attr *reg_attr)
{
- int cpuid;
+ int cpuid = FIELD_GET(KVM_DEV_ARM_VGIC_CPUID_MASK, attr->attr);
- cpuid = FIELD_GET(KVM_DEV_ARM_VGIC_CPUID_MASK, attr->attr);
-
- reg_attr->vcpu = kvm_get_vcpu_by_id(dev->kvm, cpuid);
reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+ reg_attr->vcpu = kvm_get_vcpu_by_id(dev->kvm, cpuid);
+ if (!reg_attr->vcpu)
+ return -EINVAL;
return 0;
}
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 122021f9bdfc..13eac43c632d 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -1844,15 +1844,15 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
emit_call(enter_prog, ctx);
+ /* save return value to callee saved register x20 */
+ emit(A64_MOV(1, A64_R(20), A64_R(0)), ctx);
+
/* if (__bpf_prog_enter(prog) == 0)
* goto skip_exec_of_prog;
*/
branch = ctx->image + ctx->idx;
emit(A64_NOP, ctx);
- /* save return value to callee saved register x20 */
- emit(A64_MOV(1, A64_R(20), A64_R(0)), ctx);
-
emit(A64_ADD_I(1, A64_R(0), A64_SP, args_off), ctx);
if (!p->jited)
emit_addr_mov_i64(A64_R(1), (const u64)p->insnsi, ctx);
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index 1adf2f39ce59..ec9d692838fc 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -722,6 +722,9 @@ static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_of
if (ret)
return ret;
+ /* store prog start time */
+ emit_mv(RV_REG_S1, RV_REG_A0, ctx);
+
/* if (__bpf_prog_enter(prog) == 0)
* goto skip_exec_of_prog;
*/
@@ -729,9 +732,6 @@ static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_of
/* nop reserved for conditional jump */
emit(rv_nop(), ctx);
- /* store prog start time */
- emit_mv(RV_REG_S1, RV_REG_A0, ctx);
-
/* arg1: &args_off */
emit_addi(RV_REG_A0, RV_REG_FP, -args_off, ctx);
if (!p->jited)
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
index 99f7e1f2b70a..99ea3f12c5d2 100644
--- a/arch/s390/crypto/paes_s390.c
+++ b/arch/s390/crypto/paes_s390.c
@@ -125,8 +125,19 @@ struct s390_pxts_ctx {
static inline int __paes_keyblob2pkey(struct key_blob *kb,
struct pkey_protkey *pk)
{
- return pkey_keyblob2pkey(kb->key, kb->keylen,
- pk->protkey, &pk->len, &pk->type);
+ int i, ret = -EIO;
+
+ /* try three times in case of busy card */
+ for (i = 0; ret && i < 3; i++) {
+ if (ret == -EBUSY && in_task()) {
+ if (msleep_interruptible(1000))
+ return -EINTR;
+ }
+ ret = pkey_keyblob2pkey(kb->key, kb->keylen,
+ pk->protkey, &pk->len, &pk->type);
+ }
+
+ return ret;
}
static inline int __paes_convert_key(struct s390_paes_ctx *ctx)
diff --git a/arch/s390/include/asm/dwarf.h b/arch/s390/include/asm/dwarf.h
index 4f21ae561e4d..390906b8e386 100644
--- a/arch/s390/include/asm/dwarf.h
+++ b/arch/s390/include/asm/dwarf.h
@@ -9,6 +9,7 @@
#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
#define CFI_RESTORE .cfi_restore
+#define CFI_REL_OFFSET .cfi_rel_offset
#ifdef CONFIG_AS_CFI_VAL_OFFSET
#define CFI_VAL_OFFSET .cfi_val_offset
diff --git a/arch/s390/kernel/vdso64/vdso_user_wrapper.S b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
index 57f62596e53b..85247ef5a41b 100644
--- a/arch/s390/kernel/vdso64/vdso_user_wrapper.S
+++ b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
@@ -24,8 +24,10 @@ __kernel_\func:
CFI_DEF_CFA_OFFSET (STACK_FRAME_OVERHEAD + WRAPPER_FRAME_SIZE)
CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
stg %r14,STACK_FRAME_OVERHEAD(%r15)
+ CFI_REL_OFFSET 14, STACK_FRAME_OVERHEAD
brasl %r14,__s390_vdso_\func
lg %r14,STACK_FRAME_OVERHEAD(%r15)
+ CFI_RESTORE 14
aghi %r15,WRAPPER_FRAME_SIZE
CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
CFI_RESTORE 15
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 094b43b121cd..12d22a7fa32f 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2661,7 +2661,7 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
return 0;
start = pmd_val(*pmd) & HPAGE_MASK;
- end = start + HPAGE_SIZE - 1;
+ end = start + HPAGE_SIZE;
__storage_key_init_range(start, end);
set_bit(PG_arch_1, &page->flags);
cond_resched();
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index c2e8242bd15d..dc3db86e13ff 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -139,7 +139,7 @@ static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
}
if (!test_and_set_bit(PG_arch_1, &page->flags))
- __storage_key_init_range(paddr, paddr + size - 1);
+ __storage_key_init_range(paddr, paddr + size);
}
void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index df5fac428408..59cbc94b6e69 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1807,36 +1807,41 @@ populate_extable:
if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
/* Conservatively check that src_reg + insn->off is a kernel address:
- * src_reg + insn->off >= TASK_SIZE_MAX + PAGE_SIZE
- * src_reg is used as scratch for src_reg += insn->off and restored
- * after emit_ldx if necessary
+ * src_reg + insn->off > TASK_SIZE_MAX + PAGE_SIZE
+ * and
+ * src_reg + insn->off < VSYSCALL_ADDR
*/
- u64 limit = TASK_SIZE_MAX + PAGE_SIZE;
+ u64 limit = TASK_SIZE_MAX + PAGE_SIZE - VSYSCALL_ADDR;
u8 *end_of_jmp;
- /* At end of these emitted checks, insn->off will have been added
- * to src_reg, so no need to do relative load with insn->off offset
- */
- insn_off = 0;
+ /* movabsq r10, VSYSCALL_ADDR */
+ emit_mov_imm64(&prog, BPF_REG_AX, (long)VSYSCALL_ADDR >> 32,
+ (u32)(long)VSYSCALL_ADDR);
- /* movabsq r11, limit */
- EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG));
- EMIT((u32)limit, 4);
- EMIT(limit >> 32, 4);
+ /* mov src_reg, r11 */
+ EMIT_mov(AUX_REG, src_reg);
if (insn->off) {
- /* add src_reg, insn->off */
- maybe_emit_1mod(&prog, src_reg, true);
- EMIT2_off32(0x81, add_1reg(0xC0, src_reg), insn->off);
+ /* add r11, insn->off */
+ maybe_emit_1mod(&prog, AUX_REG, true);
+ EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off);
}
- /* cmp src_reg, r11 */
- maybe_emit_mod(&prog, src_reg, AUX_REG, true);
- EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG));
+ /* sub r11, r10 */
+ maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
+ EMIT2(0x29, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
+
+ /* movabsq r10, limit */
+ emit_mov_imm64(&prog, BPF_REG_AX, (long)limit >> 32,
+ (u32)(long)limit);
+
+ /* cmp r10, r11 */
+ maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
+ EMIT2(0x39, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
- /* if unsigned '>=', goto load */
- EMIT2(X86_JAE, 0);
+ /* if unsigned '>', goto load */
+ EMIT2(X86_JA, 0);
end_of_jmp = prog;
/* xor dst_reg, dst_reg */
@@ -1862,18 +1867,6 @@ populate_extable:
/* populate jmp_offset for JMP above */
start_of_ldx[-1] = prog - start_of_ldx;
- if (insn->off && src_reg != dst_reg) {
- /* sub src_reg, insn->off
- * Restore src_reg after "add src_reg, insn->off" in prev
- * if statement. But if src_reg == dst_reg, emit_ldx
- * above already clobbered src_reg, so no need to restore.
- * If add src_reg, insn->off was unnecessary, no need to
- * restore either.
- */
- maybe_emit_1mod(&prog, src_reg, true);
- EMIT2_off32(0x81, add_1reg(0xE8, src_reg), insn->off);
- }
-
if (!bpf_prog->aux->extable)
break;
@@ -3473,3 +3466,9 @@ bool bpf_jit_supports_ptr_xchg(void)
{
return true;
}
+
+/* x86-64 JIT emits its own code to filter user addresses so return 0 here */
+u64 bpf_arch_uaddress_limit(void)
+{
+ return 0;
+}
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
index 38bcecb0e457..a2b6bb5429f5 100644
--- a/arch/xtensa/include/asm/cacheflush.h
+++ b/arch/xtensa/include/asm/cacheflush.h
@@ -100,6 +100,10 @@ void flush_cache_range(struct vm_area_struct*, ulong, ulong);
void flush_icache_range(unsigned long start, unsigned long end);
void flush_cache_page(struct vm_area_struct*,
unsigned long, unsigned long);
+#define flush_cache_all flush_cache_all
+#define flush_cache_range flush_cache_range
+#define flush_icache_range flush_icache_range
+#define flush_cache_page flush_cache_page
#else
#define flush_cache_all local_flush_cache_all
#define flush_cache_range local_flush_cache_range
@@ -136,20 +140,7 @@ void local_flush_cache_page(struct vm_area_struct *vma,
#else
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_dup_mm(mm) do { } while (0)
-
-#define flush_cache_vmap(start,end) do { } while (0)
-#define flush_cache_vmap_early(start,end) do { } while (0)
-#define flush_cache_vunmap(start,end) do { } while (0)
-
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page) do { } while (0)
-
#define flush_icache_range local_flush_icache_range
-#define flush_cache_page(vma, addr, pfn) do { } while (0)
-#define flush_cache_range(vma, start, end) do { } while (0)
#endif
@@ -162,15 +153,14 @@ void local_flush_cache_page(struct vm_area_struct *vma,
__invalidate_icache_range(start,(end) - (start)); \
} while (0)
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-
#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
extern void copy_to_user_page(struct vm_area_struct*, struct page*,
unsigned long, void*, const void*, unsigned long);
extern void copy_from_user_page(struct vm_area_struct*, struct page*,
unsigned long, void*, const void*, unsigned long);
+#define copy_to_user_page copy_to_user_page
+#define copy_from_user_page copy_from_user_page
#else
@@ -186,4 +176,6 @@ extern void copy_from_user_page(struct vm_area_struct*, struct page*,
#endif
+#include <asm-generic/cacheflush.h>
+
#endif /* _XTENSA_CACHEFLUSH_H */
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index d008a153a2b9..7ed1a2085bd7 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -115,9 +115,9 @@
#define MAKE_RA_FOR_CALL(ra,ws) (((ra) & 0x3fffffff) | (ws) << 30)
/* Convert return address to a valid pc
- * Note: We assume that the stack pointer is in the same 1GB ranges as the ra
+ * Note: 'text' is the address within the same 1GB range as the ra
*/
-#define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000))
+#define MAKE_PC_FROM_RA(ra, text) (((ra) & 0x3fffffff) | ((unsigned long)(text) & 0xc0000000))
#elif defined(__XTENSA_CALL0_ABI__)
@@ -127,9 +127,9 @@
#define MAKE_RA_FOR_CALL(ra, ws) (ra)
/* Convert return address to a valid pc
- * Note: We assume that the stack pointer is in the same 1GB ranges as the ra
+ * Note: 'text' is not used as 'ra' is always the full address
*/
-#define MAKE_PC_FROM_RA(ra, sp) (ra)
+#define MAKE_PC_FROM_RA(ra, text) (ra)
#else
#error Unsupported Xtensa ABI
diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h
index a270467556dc..86c70117371b 100644
--- a/arch/xtensa/include/asm/ptrace.h
+++ b/arch/xtensa/include/asm/ptrace.h
@@ -87,7 +87,7 @@ struct pt_regs {
# define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
# define instruction_pointer(regs) ((regs)->pc)
# define return_pointer(regs) (MAKE_PC_FROM_RA((regs)->areg[0], \
- (regs)->areg[1]))
+ (regs)->pc))
# ifndef CONFIG_SMP
# define profile_pc(regs) instruction_pointer(regs)
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index a815577d25fd..7bd66677f7b6 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -47,6 +47,7 @@
#include <asm/asm-offsets.h>
#include <asm/regs.h>
#include <asm/hw_breakpoint.h>
+#include <asm/sections.h>
#include <asm/traps.h>
extern void ret_from_fork(void);
@@ -380,7 +381,7 @@ unsigned long __get_wchan(struct task_struct *p)
int count = 0;
sp = p->thread.sp;
- pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp);
+ pc = MAKE_PC_FROM_RA(p->thread.ra, _text);
do {
if (sp < stack_page + sizeof(struct task_struct) ||
@@ -392,7 +393,7 @@ unsigned long __get_wchan(struct task_struct *p)
/* Stack layout: sp-4: ra, sp-3: sp' */
- pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), sp);
+ pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), _text);
sp = SPILL_SLOT(sp, 1);
} while (count++ < 16);
return 0;
diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c
index 831ffb648bda..ed324fdf2a2f 100644
--- a/arch/xtensa/kernel/stacktrace.c
+++ b/arch/xtensa/kernel/stacktrace.c
@@ -13,6 +13,7 @@
#include <linux/stacktrace.h>
#include <asm/ftrace.h>
+#include <asm/sections.h>
#include <asm/stacktrace.h>
#include <asm/traps.h>
#include <linux/uaccess.h>
@@ -189,7 +190,7 @@ void walk_stackframe(unsigned long *sp,
if (a1 <= (unsigned long)sp)
break;
- frame.pc = MAKE_PC_FROM_RA(a0, a1);
+ frame.pc = MAKE_PC_FROM_RA(a0, _text);
frame.sp = a1;
if (fn(&frame, data))
diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
index 8896e691c051..abec44b687df 100644
--- a/arch/xtensa/platforms/iss/console.c
+++ b/arch/xtensa/platforms/iss/console.c
@@ -166,10 +166,8 @@ late_initcall(rs_init);
static void iss_console_write(struct console *co, const char *s, unsigned count)
{
- if (s && *s != 0) {
- int len = strlen(s);
- simc_write(1, s, count < len ? count : len);
- }
+ if (s && *s != 0)
+ simc_write(1, s, min(count, strlen(s)));
}
static struct tty_driver* iss_console_device(struct console *c, int *index)
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index b0d671db178a..ea31ac7ac1ca 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -148,10 +148,12 @@ packet_buffer_get(struct client *client, char __user *data, size_t user_length)
if (atomic_read(&buffer->size) == 0)
return -ENODEV;
- /* FIXME: Check length <= user_length. */
+ length = buffer->head->length;
+
+ if (length > user_length)
+ return 0;
end = buffer->data + buffer->capacity;
- length = buffer->head->length;
if (&buffer->head->data[length] < end) {
if (copy_to_user(data, buffer->head->data, length))
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 38d19410a2be..b9ae0340b8a7 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -1556,6 +1556,8 @@ static int handle_at_packet(struct context *context,
#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
+static u32 get_cycle_time(struct fw_ohci *ohci);
+
static void handle_local_rom(struct fw_ohci *ohci,
struct fw_packet *packet, u32 csr)
{
@@ -1580,6 +1582,8 @@ static void handle_local_rom(struct fw_ohci *ohci,
(void *) ohci->config_rom + i, length);
}
+ // Timestamping on behalf of the hardware.
+ response.timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
fw_core_handle_response(&ohci->card, &response);
}
@@ -1628,6 +1632,8 @@ static void handle_local_lock(struct fw_ohci *ohci,
fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
out:
+ // Timestamping on behalf of the hardware.
+ response.timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
fw_core_handle_response(&ohci->card, &response);
}
@@ -1670,8 +1676,6 @@ static void handle_local_request(struct context *ctx, struct fw_packet *packet)
}
}
-static u32 get_cycle_time(struct fw_ohci *ohci);
-
static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
{
unsigned long flags;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 59b5dd0e2f41..14daf432f30b 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -5705,7 +5705,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6141,
.family = MV88E6XXX_FAMILY_6341,
.name = "Marvell 88E6141",
- .num_databases = 4096,
+ .num_databases = 256,
.num_macs = 2048,
.num_ports = 6,
.num_internal_phys = 5,
@@ -6164,7 +6164,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
.family = MV88E6XXX_FAMILY_6341,
.name = "Marvell 88E6341",
- .num_databases = 4096,
+ .num_databases = 256,
.num_macs = 2048,
.num_internal_phys = 5,
.num_ports = 6,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index b1f84b37032a..c7e7dac057a3 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) controller driver
*
- * Copyright (c) 2014-2020 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet: " fmt
@@ -2467,14 +2467,18 @@ static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
{
u32 reg;
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
- if (reg & CMD_SW_RESET)
+ if (reg & CMD_SW_RESET) {
+ spin_unlock_bh(&priv->reg_lock);
return;
+ }
if (enable)
reg |= mask;
else
reg &= ~mask;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
/* UniMAC stops on a packet boundary, wait for a full-size packet
* to be processed
@@ -2490,8 +2494,10 @@ static void reset_umac(struct bcmgenet_priv *priv)
udelay(10);
/* issue soft reset and disable MAC while updating its registers */
+ spin_lock_bh(&priv->reg_lock);
bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
udelay(2);
+ spin_unlock_bh(&priv->reg_lock);
}
static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
@@ -3334,7 +3340,9 @@ static void bcmgenet_netif_start(struct net_device *dev)
struct bcmgenet_priv *priv = netdev_priv(dev);
/* Start the network engine */
+ netif_addr_lock_bh(dev);
bcmgenet_set_rx_mode(dev);
+ netif_addr_unlock_bh(dev);
bcmgenet_enable_rx_napi(priv);
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
@@ -3595,16 +3603,19 @@ static void bcmgenet_set_rx_mode(struct net_device *dev)
* 3. The number of filters needed exceeds the number filters
* supported by the hardware.
*/
+ spin_lock(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
(nfilter > MAX_MDF_FILTER)) {
reg |= CMD_PROMISC;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock(&priv->reg_lock);
bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
return;
} else {
reg &= ~CMD_PROMISC;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock(&priv->reg_lock);
}
/* update MDF filter */
@@ -4003,6 +4014,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
goto err;
}
+ spin_lock_init(&priv->reg_lock);
spin_lock_init(&priv->lock);
/* Set default pause parameters */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 7523b60b3c1c..43b923c48b14 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2014-2020 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
*/
#ifndef __BCMGENET_H__
@@ -573,6 +573,8 @@ struct bcmgenet_rxnfc_rule {
/* device context */
struct bcmgenet_priv {
void __iomem *base;
+ /* reg_lock: lock to serialize access to shared registers */
+ spinlock_t reg_lock;
enum bcmgenet_version version;
struct net_device *dev;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 7a41cad5788f..1248792d7fd4 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
*
- * Copyright (c) 2014-2020 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
*/
#define pr_fmt(fmt) "bcmgenet_wol: " fmt
@@ -151,6 +151,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
}
/* Can't suspend with WoL if MAC is still in reset */
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
if (reg & CMD_SW_RESET)
reg &= ~CMD_SW_RESET;
@@ -158,6 +159,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
/* disable RX */
reg &= ~CMD_RX_EN;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
mdelay(10);
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
@@ -203,6 +205,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
}
/* Enable CRC forward */
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
priv->crc_fwd_en = 1;
reg |= CMD_CRC_FWD;
@@ -210,6 +213,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
/* Receiver must be enabled for WOL MP detection */
reg |= CMD_RX_EN;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
reg = UMAC_IRQ_MPD_R;
if (hfb_enable)
@@ -256,7 +260,9 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
}
/* Disable CRC Forward */
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
reg &= ~CMD_CRC_FWD;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 9ada89355747..c4a3698cef66 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -2,7 +2,7 @@
/*
* Broadcom GENET MDIO routines
*
- * Copyright (c) 2014-2017 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
*/
#include <linux/acpi.h>
@@ -76,6 +76,7 @@ static void bcmgenet_mac_config(struct net_device *dev)
reg |= RGMII_LINK;
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ spin_lock_bh(&priv->reg_lock);
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
CMD_HD_EN |
@@ -88,6 +89,7 @@ static void bcmgenet_mac_config(struct net_device *dev)
reg |= CMD_TX_EN | CMD_RX_EN;
}
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ spin_unlock_bh(&priv->reg_lock);
active = phy_init_eee(phydev, 0) >= 0;
bcmgenet_eee_enable_set(dev,
@@ -275,6 +277,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
* block for the interface to work, unconditionally clear the
* Out-of-band disable since we do not need it.
*/
+ mutex_lock(&phydev->lock);
reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
reg &= ~OOB_DISABLE;
if (priv->ext_phy) {
@@ -286,6 +289,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
reg |= RGMII_MODE_EN;
}
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ mutex_unlock(&phydev->lock);
if (init)
dev_info(kdev, "configuring instance for %s\n", phy_name);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 7246e13dd559..97291bfbeea5 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -312,7 +312,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
void *kern_buf;
/* Copy the user space buf */
- kern_buf = memdup_user(buf, nbytes);
+ kern_buf = memdup_user_nul(buf, nbytes);
if (IS_ERR(kern_buf))
return PTR_ERR(kern_buf);
@@ -372,7 +372,7 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
void *kern_buf;
/* Copy the user space buf */
- kern_buf = memdup_user(buf, nbytes);
+ kern_buf = memdup_user_nul(buf, nbytes);
if (IS_ERR(kern_buf))
return PTR_ERR(kern_buf);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 49d5808b7d11..de52bcb884c4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2670,12 +2670,12 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
lb->loopback = 1;
q = &adap->sge.ethtxq[pi->first_qset];
- __netif_tx_lock(q->txq, smp_processor_id());
+ __netif_tx_lock_bh(q->txq);
reclaim_completed_tx(adap, &q->q, -1, true);
credits = txq_avail(&q->q) - ndesc;
if (unlikely(credits < 0)) {
- __netif_tx_unlock(q->txq);
+ __netif_tx_unlock_bh(q->txq);
return -ENOMEM;
}
@@ -2710,7 +2710,7 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
init_completion(&lb->completion);
txq_advance(&q->q, ndesc);
cxgb4_ring_tx_db(adap, &q->q, ndesc);
- __netif_tx_unlock(q->txq);
+ __netif_tx_unlock_bh(q->txq);
/* wait for the pkt to return */
ret = wait_for_completion_timeout(&lb->completion, 10 * HZ);
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 93544f1cc2a5..f7ae0e0aa4a4 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -157,7 +157,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
* the lower time out
*/
for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- usleep_range(50, 60);
+ udelay(50);
mdic = er32(MDIC);
if (mdic & E1000_MDIC_READY)
break;
@@ -181,7 +181,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
* reading duplicate data in the next MDIC transaction.
*/
if (hw->mac.type == e1000_pch2lan)
- usleep_range(100, 150);
+ udelay(100);
if (success) {
*data = (u16)mdic;
@@ -237,7 +237,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
* the lower time out
*/
for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
- usleep_range(50, 60);
+ udelay(50);
mdic = er32(MDIC);
if (mdic & E1000_MDIC_READY)
break;
@@ -261,7 +261,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
* reading duplicate data in the next MDIC transaction.
*/
if (hw->mac.type == e1000_pch2lan)
- usleep_range(100, 150);
+ udelay(100);
if (success)
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c
index d252d98218d0..9fc0fd95a13d 100644
--- a/drivers/net/ethernet/intel/ice/ice_debugfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c
@@ -171,7 +171,7 @@ ice_debugfs_module_write(struct file *filp, const char __user *buf,
if (*ppos != 0 || count > 8)
return -EINVAL;
- cmd_buf = memdup_user(buf, count);
+ cmd_buf = memdup_user_nul(buf, count);
if (IS_ERR(cmd_buf))
return PTR_ERR(cmd_buf);
@@ -257,7 +257,7 @@ ice_debugfs_nr_messages_write(struct file *filp, const char __user *buf,
if (*ppos != 0 || count > 4)
return -EINVAL;
- cmd_buf = memdup_user(buf, count);
+ cmd_buf = memdup_user_nul(buf, count);
if (IS_ERR(cmd_buf))
return PTR_ERR(cmd_buf);
@@ -332,7 +332,7 @@ ice_debugfs_enable_write(struct file *filp, const char __user *buf,
if (*ppos != 0 || count > 2)
return -EINVAL;
- cmd_buf = memdup_user(buf, count);
+ cmd_buf = memdup_user_nul(buf, count);
if (IS_ERR(cmd_buf))
return PTR_ERR(cmd_buf);
@@ -428,7 +428,7 @@ ice_debugfs_log_size_write(struct file *filp, const char __user *buf,
if (*ppos != 0 || count > 5)
return -EINVAL;
- cmd_buf = memdup_user(buf, count);
+ cmd_buf = memdup_user_nul(buf, count);
if (IS_ERR(cmd_buf))
return PTR_ERR(cmd_buf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 2500f5ba4f5a..881d704644fb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -999,12 +999,10 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
u16 pcifunc;
int ret, lf;
- cmd_buf = memdup_user(buffer, count + 1);
+ cmd_buf = memdup_user_nul(buffer, count);
if (IS_ERR(cmd_buf))
return -ENOMEM;
- cmd_buf[count] = '\0';
-
cmd_buf_tmp = strchr(cmd_buf, '\n');
if (cmd_buf_tmp) {
*cmd_buf_tmp = '\0';
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index a5ac21a0ee33..cb6b33a228ea 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -1868,8 +1868,8 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
struct flow_cls_offload *f)
{
struct qede_arfs_fltr_node *n;
- int min_hlen, rc = -EINVAL;
struct qede_arfs_tuple t;
+ int min_hlen, rc;
__qede_lock(edev);
@@ -1879,7 +1879,8 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
}
/* parse flower attribute and prepare filter */
- if (qede_parse_flow_attr(edev, proto, f->rule, &t))
+ rc = qede_parse_flow_attr(edev, proto, f->rule, &t);
+ if (rc)
goto unlock;
/* Validate profile mode and number of filters */
@@ -1888,11 +1889,13 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
DP_NOTICE(edev,
"Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n",
t.mode, edev->arfs->mode, edev->arfs->filter_count);
+ rc = -EINVAL;
goto unlock;
}
/* parse tc actions and get the vf_id */
- if (qede_parse_actions(edev, &f->rule->action, f->common.extack))
+ rc = qede_parse_actions(edev, &f->rule->action, f->common.extack);
+ if (rc)
goto unlock;
if (qede_flow_find_fltr(edev, &t)) {
@@ -1998,10 +2001,9 @@ static int qede_flow_spec_to_rule(struct qede_dev *edev,
if (IS_ERR(flow))
return PTR_ERR(flow);
- if (qede_parse_flow_attr(edev, proto, flow->rule, t)) {
- err = -EINVAL;
+ err = qede_parse_flow_attr(edev, proto, flow->rule, t);
+ if (err)
goto err_out;
- }
/* Make sure location is valid and filter isn't already set */
err = qede_flow_spec_validate(edev, &flow->rule->action, t,
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index ba319fc21957..3a9148fb1422 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -1674,6 +1674,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
bool raw_proto = false;
void *oiph;
__be32 vni = 0;
+ int nh;
/* Need UDP and VXLAN header to be present */
if (!pskb_may_pull(skb, VXLAN_HLEN))
@@ -1762,12 +1763,28 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
skb->pkt_type = PACKET_HOST;
}
- oiph = skb_network_header(skb);
+ /* Save offset of outer header relative to skb->head,
+ * because we are going to reset the network header to the inner header
+ * and might change skb->head.
+ */
+ nh = skb_network_header(skb) - skb->head;
+
skb_reset_network_header(skb);
+ if (!pskb_inet_may_pull(skb)) {
+ DEV_STATS_INC(vxlan->dev, rx_length_errors);
+ DEV_STATS_INC(vxlan->dev, rx_errors);
+ vxlan_vnifilter_count(vxlan, vni, vninode,
+ VXLAN_VNI_STATS_RX_ERRORS, 0);
+ goto drop;
+ }
+
+ /* Get the outer header. */
+ oiph = skb->head + nh;
+
if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
- ++vxlan->dev->stats.rx_frame_errors;
- ++vxlan->dev->stats.rx_errors;
+ DEV_STATS_INC(vxlan->dev, rx_frame_errors);
+ DEV_STATS_INC(vxlan->dev, rx_errors);
vxlan_vnifilter_count(vxlan, vni, vninode,
VXLAN_VNI_STATS_RX_ERRORS, 0);
goto drop;
@@ -1837,7 +1854,9 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
goto out;
if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
- dev->stats.tx_dropped++;
+ dev_core_stats_tx_dropped_inc(dev);
+ vxlan_vnifilter_count(vxlan, vni, NULL,
+ VXLAN_VNI_STATS_TX_DROPS, 0);
goto out;
}
parp = arp_hdr(skb);
@@ -1893,7 +1912,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
reply->pkt_type = PACKET_HOST;
if (netif_rx(reply) == NET_RX_DROP) {
- dev->stats.rx_dropped++;
+ dev_core_stats_rx_dropped_inc(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
@@ -2052,7 +2071,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
goto out;
if (netif_rx(reply) == NET_RX_DROP) {
- dev->stats.rx_dropped++;
+ dev_core_stats_rx_dropped_inc(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
@@ -2263,7 +2282,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
len);
} else {
drop:
- dev->stats.rx_dropped++;
+ dev_core_stats_rx_dropped_inc(dev);
vxlan_vnifilter_count(dst_vxlan, vni, NULL,
VXLAN_VNI_STATS_RX_DROPS, 0);
}
@@ -2295,7 +2314,7 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
addr_family, dst_port,
vxlan->cfg.flags);
if (!dst_vxlan) {
- dev->stats.tx_errors++;
+ DEV_STATS_INC(dev, tx_errors);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_TX_ERRORS, 0);
kfree_skb(skb);
@@ -2559,7 +2578,7 @@ out_unlock:
return;
drop:
- dev->stats.tx_dropped++;
+ dev_core_stats_tx_dropped_inc(dev);
vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
return;
@@ -2567,11 +2586,11 @@ drop:
tx_error:
rcu_read_unlock();
if (err == -ELOOP)
- dev->stats.collisions++;
+ DEV_STATS_INC(dev, collisions);
else if (err == -ENETUNREACH)
- dev->stats.tx_carrier_errors++;
+ DEV_STATS_INC(dev, tx_carrier_errors);
dst_release(ndst);
- dev->stats.tx_errors++;
+ DEV_STATS_INC(dev, tx_errors);
vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_ERRORS, 0);
kfree_skb(skb);
}
@@ -2604,7 +2623,7 @@ static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev,
return;
drop:
- dev->stats.tx_dropped++;
+ dev_core_stats_tx_dropped_inc(dev);
vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
@@ -2642,7 +2661,7 @@ static netdev_tx_t vxlan_xmit_nhid(struct sk_buff *skb, struct net_device *dev,
return NETDEV_TX_OK;
drop:
- dev->stats.tx_dropped++;
+ dev_core_stats_tx_dropped_inc(dev);
vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
dev_kfree_skb(skb);
@@ -2739,7 +2758,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
!is_multicast_ether_addr(eth->h_dest))
vxlan_fdb_miss(vxlan, eth->h_dest);
- dev->stats.tx_dropped++;
+ dev_core_stats_tx_dropped_inc(dev);
vxlan_vnifilter_count(vxlan, vni, NULL,
VXLAN_VNI_STATS_TX_DROPS, 0);
kfree_skb(skb);
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
index d376fa7114d1..029efe16f8cc 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
@@ -43,7 +43,7 @@
#define SCU614 0x614 /* Disable GPIO Internal Pull-Down #1 */
#define SCU618 0x618 /* Disable GPIO Internal Pull-Down #2 */
#define SCU61C 0x61c /* Disable GPIO Internal Pull-Down #3 */
-#define SCU620 0x620 /* Disable GPIO Internal Pull-Down #4 */
+#define SCU630 0x630 /* Disable GPIO Internal Pull-Down #4 */
#define SCU634 0x634 /* Disable GPIO Internal Pull-Down #5 */
#define SCU638 0x638 /* Disable GPIO Internal Pull-Down #6 */
#define SCU690 0x690 /* Multi-function Pin Control #24 */
@@ -2495,38 +2495,38 @@ static struct aspeed_pin_config aspeed_g6_configs[] = {
ASPEED_PULL_DOWN_PINCONF(D14, SCU61C, 0),
/* GPIOS7 */
- ASPEED_PULL_DOWN_PINCONF(T24, SCU620, 23),
+ ASPEED_PULL_DOWN_PINCONF(T24, SCU630, 23),
/* GPIOS6 */
- ASPEED_PULL_DOWN_PINCONF(P23, SCU620, 22),
+ ASPEED_PULL_DOWN_PINCONF(P23, SCU630, 22),
/* GPIOS5 */
- ASPEED_PULL_DOWN_PINCONF(P24, SCU620, 21),
+ ASPEED_PULL_DOWN_PINCONF(P24, SCU630, 21),
/* GPIOS4 */
- ASPEED_PULL_DOWN_PINCONF(R26, SCU620, 20),
+ ASPEED_PULL_DOWN_PINCONF(R26, SCU630, 20),
/* GPIOS3*/
- ASPEED_PULL_DOWN_PINCONF(R24, SCU620, 19),
+ ASPEED_PULL_DOWN_PINCONF(R24, SCU630, 19),
/* GPIOS2 */
- ASPEED_PULL_DOWN_PINCONF(T26, SCU620, 18),
+ ASPEED_PULL_DOWN_PINCONF(T26, SCU630, 18),
/* GPIOS1 */
- ASPEED_PULL_DOWN_PINCONF(T25, SCU620, 17),
+ ASPEED_PULL_DOWN_PINCONF(T25, SCU630, 17),
/* GPIOS0 */
- ASPEED_PULL_DOWN_PINCONF(R23, SCU620, 16),
+ ASPEED_PULL_DOWN_PINCONF(R23, SCU630, 16),
/* GPIOR7 */
- ASPEED_PULL_DOWN_PINCONF(U26, SCU620, 15),
+ ASPEED_PULL_DOWN_PINCONF(U26, SCU630, 15),
/* GPIOR6 */
- ASPEED_PULL_DOWN_PINCONF(W26, SCU620, 14),
+ ASPEED_PULL_DOWN_PINCONF(W26, SCU630, 14),
/* GPIOR5 */
- ASPEED_PULL_DOWN_PINCONF(T23, SCU620, 13),
+ ASPEED_PULL_DOWN_PINCONF(T23, SCU630, 13),
/* GPIOR4 */
- ASPEED_PULL_DOWN_PINCONF(U25, SCU620, 12),
+ ASPEED_PULL_DOWN_PINCONF(U25, SCU630, 12),
/* GPIOR3*/
- ASPEED_PULL_DOWN_PINCONF(V26, SCU620, 11),
+ ASPEED_PULL_DOWN_PINCONF(V26, SCU630, 11),
/* GPIOR2 */
- ASPEED_PULL_DOWN_PINCONF(V24, SCU620, 10),
+ ASPEED_PULL_DOWN_PINCONF(V24, SCU630, 10),
/* GPIOR1 */
- ASPEED_PULL_DOWN_PINCONF(U24, SCU620, 9),
+ ASPEED_PULL_DOWN_PINCONF(U24, SCU630, 9),
/* GPIOR0 */
- ASPEED_PULL_DOWN_PINCONF(V25, SCU620, 8),
+ ASPEED_PULL_DOWN_PINCONF(V25, SCU630, 8),
/* GPIOX7 */
ASPEED_PULL_DOWN_PINCONF(AB10, SCU634, 31),
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 6649357637ff..cffeb869130d 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -2124,13 +2124,7 @@ int pinctrl_enable(struct pinctrl_dev *pctldev)
error = pinctrl_claim_hogs(pctldev);
if (error) {
- dev_err(pctldev->dev, "could not claim hogs: %i\n",
- error);
- pinctrl_free_pindescs(pctldev, pctldev->desc->pins,
- pctldev->desc->npins);
- mutex_destroy(&pctldev->mutex);
- kfree(pctldev);
-
+ dev_err(pctldev->dev, "could not claim hogs: %i\n", error);
return error;
}
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index df1efc2e5202..6a94ecd6a8de 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -220,14 +220,16 @@ int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev)
for (state = 0; ; state++) {
/* Retrieve the pinctrl-* property */
propname = kasprintf(GFP_KERNEL, "pinctrl-%d", state);
- if (!propname)
- return -ENOMEM;
+ if (!propname) {
+ ret = -ENOMEM;
+ goto err;
+ }
prop = of_find_property(np, propname, &size);
kfree(propname);
if (!prop) {
if (state == 0) {
- of_node_put(np);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err;
}
break;
}
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index ac97724c59ba..4e87f5b875c0 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -231,6 +231,7 @@ static const unsigned int byt_score_pins_map[BYT_NGPIO_SCORE] = {
/* SCORE groups */
static const unsigned int byt_score_uart1_pins[] = { 70, 71, 72, 73 };
static const unsigned int byt_score_uart2_pins[] = { 74, 75, 76, 77 };
+static const unsigned int byt_score_uart3_pins[] = { 57, 61 };
static const unsigned int byt_score_pwm0_pins[] = { 94 };
static const unsigned int byt_score_pwm1_pins[] = { 95 };
@@ -278,37 +279,38 @@ static const unsigned int byt_score_plt_clk5_pins[] = { 101 };
static const unsigned int byt_score_smbus_pins[] = { 51, 52, 53 };
static const struct intel_pingroup byt_score_groups[] = {
- PIN_GROUP("uart1_grp", byt_score_uart1_pins, 1),
- PIN_GROUP("uart2_grp", byt_score_uart2_pins, 1),
- PIN_GROUP("pwm0_grp", byt_score_pwm0_pins, 1),
- PIN_GROUP("pwm1_grp", byt_score_pwm1_pins, 1),
- PIN_GROUP("ssp2_grp", byt_score_ssp2_pins, 1),
- PIN_GROUP("sio_spi_grp", byt_score_sio_spi_pins, 1),
- PIN_GROUP("i2c5_grp", byt_score_i2c5_pins, 1),
- PIN_GROUP("i2c6_grp", byt_score_i2c6_pins, 1),
- PIN_GROUP("i2c4_grp", byt_score_i2c4_pins, 1),
- PIN_GROUP("i2c3_grp", byt_score_i2c3_pins, 1),
- PIN_GROUP("i2c2_grp", byt_score_i2c2_pins, 1),
- PIN_GROUP("i2c1_grp", byt_score_i2c1_pins, 1),
- PIN_GROUP("i2c0_grp", byt_score_i2c0_pins, 1),
- PIN_GROUP("ssp0_grp", byt_score_ssp0_pins, 1),
- PIN_GROUP("ssp1_grp", byt_score_ssp1_pins, 1),
- PIN_GROUP("sdcard_grp", byt_score_sdcard_pins, byt_score_sdcard_mux_values),
- PIN_GROUP("sdio_grp", byt_score_sdio_pins, 1),
- PIN_GROUP("emmc_grp", byt_score_emmc_pins, 1),
- PIN_GROUP("lpc_grp", byt_score_ilb_lpc_pins, 1),
- PIN_GROUP("sata_grp", byt_score_sata_pins, 1),
- PIN_GROUP("plt_clk0_grp", byt_score_plt_clk0_pins, 1),
- PIN_GROUP("plt_clk1_grp", byt_score_plt_clk1_pins, 1),
- PIN_GROUP("plt_clk2_grp", byt_score_plt_clk2_pins, 1),
- PIN_GROUP("plt_clk3_grp", byt_score_plt_clk3_pins, 1),
- PIN_GROUP("plt_clk4_grp", byt_score_plt_clk4_pins, 1),
- PIN_GROUP("plt_clk5_grp", byt_score_plt_clk5_pins, 1),
- PIN_GROUP("smbus_grp", byt_score_smbus_pins, 1),
+ PIN_GROUP_GPIO("uart1_grp", byt_score_uart1_pins, 1),
+ PIN_GROUP_GPIO("uart2_grp", byt_score_uart2_pins, 1),
+ PIN_GROUP_GPIO("uart3_grp", byt_score_uart3_pins, 1),
+ PIN_GROUP_GPIO("pwm0_grp", byt_score_pwm0_pins, 1),
+ PIN_GROUP_GPIO("pwm1_grp", byt_score_pwm1_pins, 1),
+ PIN_GROUP_GPIO("ssp2_grp", byt_score_ssp2_pins, 1),
+ PIN_GROUP_GPIO("sio_spi_grp", byt_score_sio_spi_pins, 1),
+ PIN_GROUP_GPIO("i2c5_grp", byt_score_i2c5_pins, 1),
+ PIN_GROUP_GPIO("i2c6_grp", byt_score_i2c6_pins, 1),
+ PIN_GROUP_GPIO("i2c4_grp", byt_score_i2c4_pins, 1),
+ PIN_GROUP_GPIO("i2c3_grp", byt_score_i2c3_pins, 1),
+ PIN_GROUP_GPIO("i2c2_grp", byt_score_i2c2_pins, 1),
+ PIN_GROUP_GPIO("i2c1_grp", byt_score_i2c1_pins, 1),
+ PIN_GROUP_GPIO("i2c0_grp", byt_score_i2c0_pins, 1),
+ PIN_GROUP_GPIO("ssp0_grp", byt_score_ssp0_pins, 1),
+ PIN_GROUP_GPIO("ssp1_grp", byt_score_ssp1_pins, 1),
+ PIN_GROUP_GPIO("sdcard_grp", byt_score_sdcard_pins, byt_score_sdcard_mux_values),
+ PIN_GROUP_GPIO("sdio_grp", byt_score_sdio_pins, 1),
+ PIN_GROUP_GPIO("emmc_grp", byt_score_emmc_pins, 1),
+ PIN_GROUP_GPIO("lpc_grp", byt_score_ilb_lpc_pins, 1),
+ PIN_GROUP_GPIO("sata_grp", byt_score_sata_pins, 1),
+ PIN_GROUP_GPIO("plt_clk0_grp", byt_score_plt_clk0_pins, 1),
+ PIN_GROUP_GPIO("plt_clk1_grp", byt_score_plt_clk1_pins, 1),
+ PIN_GROUP_GPIO("plt_clk2_grp", byt_score_plt_clk2_pins, 1),
+ PIN_GROUP_GPIO("plt_clk3_grp", byt_score_plt_clk3_pins, 1),
+ PIN_GROUP_GPIO("plt_clk4_grp", byt_score_plt_clk4_pins, 1),
+ PIN_GROUP_GPIO("plt_clk5_grp", byt_score_plt_clk5_pins, 1),
+ PIN_GROUP_GPIO("smbus_grp", byt_score_smbus_pins, 1),
};
static const char * const byt_score_uart_groups[] = {
- "uart1_grp", "uart2_grp",
+ "uart1_grp", "uart2_grp", "uart3_grp",
};
static const char * const byt_score_pwm_groups[] = {
"pwm0_grp", "pwm1_grp",
@@ -332,12 +334,14 @@ static const char * const byt_score_plt_clk_groups[] = {
};
static const char * const byt_score_smbus_groups[] = { "smbus_grp" };
static const char * const byt_score_gpio_groups[] = {
- "uart1_grp", "uart2_grp", "pwm0_grp", "pwm1_grp", "ssp0_grp",
- "ssp1_grp", "ssp2_grp", "sio_spi_grp", "i2c0_grp", "i2c1_grp",
- "i2c2_grp", "i2c3_grp", "i2c4_grp", "i2c5_grp", "i2c6_grp",
- "sdcard_grp", "sdio_grp", "emmc_grp", "lpc_grp", "sata_grp",
- "plt_clk0_grp", "plt_clk1_grp", "plt_clk2_grp", "plt_clk3_grp",
- "plt_clk4_grp", "plt_clk5_grp", "smbus_grp",
+ "uart1_grp_gpio", "uart2_grp_gpio", "uart3_grp_gpio", "pwm0_grp_gpio",
+ "pwm1_grp_gpio", "ssp0_grp_gpio", "ssp1_grp_gpio", "ssp2_grp_gpio",
+ "sio_spi_grp_gpio", "i2c0_grp_gpio", "i2c1_grp_gpio", "i2c2_grp_gpio",
+ "i2c3_grp_gpio", "i2c4_grp_gpio", "i2c5_grp_gpio", "i2c6_grp_gpio",
+ "sdcard_grp_gpio", "sdio_grp_gpio", "emmc_grp_gpio", "lpc_grp_gpio",
+ "sata_grp_gpio", "plt_clk0_grp_gpio", "plt_clk1_grp_gpio",
+ "plt_clk2_grp_gpio", "plt_clk3_grp_gpio", "plt_clk4_grp_gpio",
+ "plt_clk5_grp_gpio", "smbus_grp_gpio",
};
static const struct intel_function byt_score_functions[] = {
@@ -456,8 +460,8 @@ static const struct intel_pingroup byt_sus_groups[] = {
PIN_GROUP("usb_oc_grp_gpio", byt_sus_usb_over_current_pins, byt_sus_usb_over_current_gpio_mode_values),
PIN_GROUP("usb_ulpi_grp_gpio", byt_sus_usb_ulpi_pins, byt_sus_usb_ulpi_gpio_mode_values),
PIN_GROUP("pcu_spi_grp_gpio", byt_sus_pcu_spi_pins, byt_sus_pcu_spi_gpio_mode_values),
- PIN_GROUP("pmu_clk1_grp", byt_sus_pmu_clk1_pins, 1),
- PIN_GROUP("pmu_clk2_grp", byt_sus_pmu_clk2_pins, 1),
+ PIN_GROUP_GPIO("pmu_clk1_grp", byt_sus_pmu_clk1_pins, 1),
+ PIN_GROUP_GPIO("pmu_clk2_grp", byt_sus_pmu_clk2_pins, 1),
};
static const char * const byt_sus_usb_groups[] = {
@@ -469,7 +473,7 @@ static const char * const byt_sus_pmu_clk_groups[] = {
};
static const char * const byt_sus_gpio_groups[] = {
"usb_oc_grp_gpio", "usb_ulpi_grp_gpio", "pcu_spi_grp_gpio",
- "pmu_clk1_grp", "pmu_clk2_grp",
+ "pmu_clk1_grp_gpio", "pmu_clk2_grp_gpio",
};
static const struct intel_function byt_sus_functions[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index fde65e18cd14..6981e2fab93f 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -179,6 +179,10 @@ struct intel_community {
.modes = __builtin_choose_expr(__builtin_constant_p((m)), NULL, (m)), \
}
+#define PIN_GROUP_GPIO(n, p, m) \
+ PIN_GROUP(n, p, m), \
+ PIN_GROUP(n "_gpio", p, 0)
+
#define FUNCTION(n, g) \
{ \
.func = PINCTRL_PINFUNCTION((n), (g), ARRAY_SIZE(g)), \
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index b6bc31abd2b0..b19bc391705e 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -165,20 +165,21 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SR, &ret);
break;
case PIN_CONFIG_INPUT_ENABLE:
- case PIN_CONFIG_OUTPUT_ENABLE:
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_IES, &ret);
+ if (!ret)
+ err = -EINVAL;
+ break;
+ case PIN_CONFIG_OUTPUT:
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &ret);
if (err)
break;
- /* CONFIG Current direction return value
- * ------------- ----------------- ----------------------
- * OUTPUT_ENABLE output 1 (= HW value)
- * input 0 (= HW value)
- * INPUT_ENABLE output 0 (= reverse HW value)
- * input 1 (= reverse HW value)
- */
- if (param == PIN_CONFIG_INPUT_ENABLE)
- ret = !ret;
+ if (!ret) {
+ err = -EINVAL;
+ break;
+ }
+
+ err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DO, &ret);
break;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &ret);
@@ -193,6 +194,8 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
}
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SMT, &ret);
+ if (!ret)
+ err = -EINVAL;
break;
case PIN_CONFIG_DRIVE_STRENGTH:
if (!hw->soc->drive_get)
@@ -281,26 +284,9 @@ static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
break;
err = hw->soc->bias_set_combo(hw, desc, 0, arg);
break;
- case PIN_CONFIG_OUTPUT_ENABLE:
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT,
- MTK_DISABLE);
- /* Keep set direction to consider the case that a GPIO pin
- * does not have SMT control
- */
- if (err != -ENOTSUPP)
- break;
-
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
- MTK_OUTPUT);
- break;
case PIN_CONFIG_INPUT_ENABLE:
/* regard all non-zero value as enable */
err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_IES, !!arg);
- if (err)
- break;
-
- err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
- MTK_INPUT);
break;
case PIN_CONFIG_SLEW_RATE:
/* regard all non-zero value as enable */
diff --git a/drivers/pinctrl/meson/pinctrl-meson-a1.c b/drivers/pinctrl/meson/pinctrl-meson-a1.c
index 79f5d753d7e1..50a87d9618a8 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-a1.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-a1.c
@@ -250,7 +250,7 @@ static const unsigned int pdm_dclk_x_pins[] = { GPIOX_10 };
static const unsigned int pdm_din2_a_pins[] = { GPIOA_6 };
static const unsigned int pdm_din1_a_pins[] = { GPIOA_7 };
static const unsigned int pdm_din0_a_pins[] = { GPIOA_8 };
-static const unsigned int pdm_dclk_pins[] = { GPIOA_9 };
+static const unsigned int pdm_dclk_a_pins[] = { GPIOA_9 };
/* gen_clk */
static const unsigned int gen_clk_x_pins[] = { GPIOX_7 };
@@ -591,7 +591,7 @@ static struct meson_pmx_group meson_a1_periphs_groups[] = {
GROUP(pdm_din2_a, 3),
GROUP(pdm_din1_a, 3),
GROUP(pdm_din0_a, 3),
- GROUP(pdm_dclk, 3),
+ GROUP(pdm_dclk_a, 3),
GROUP(pwm_c_a, 3),
GROUP(pwm_b_a, 3),
@@ -755,7 +755,7 @@ static const char * const spi_a_groups[] = {
static const char * const pdm_groups[] = {
"pdm_din0_x", "pdm_din1_x", "pdm_din2_x", "pdm_dclk_x", "pdm_din2_a",
- "pdm_din1_a", "pdm_din0_a", "pdm_dclk",
+ "pdm_din1_a", "pdm_din0_a", "pdm_dclk_a",
};
static const char * const gen_clk_groups[] = {
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index eb5a8c654260..20425afc6b33 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -2045,7 +2045,9 @@ static void rzg2l_gpio_irq_restore(struct rzg2l_pinctrl *pctrl)
for (unsigned int i = 0; i < RZG2L_TINT_MAX_INTERRUPT; i++) {
struct irq_data *data;
+ unsigned long flags;
unsigned int virq;
+ int ret;
if (!pctrl->hwirq[i])
continue;
@@ -2063,8 +2065,18 @@ static void rzg2l_gpio_irq_restore(struct rzg2l_pinctrl *pctrl)
continue;
}
- if (!irqd_irq_disabled(data))
+ /*
+ * This has to be atomically executed to protect against a concurrent
+ * interrupt.
+ */
+ raw_spin_lock_irqsave(&pctrl->lock.rlock, flags);
+ ret = rzg2l_gpio_irq_set_type(data, irqd_get_trigger_type(data));
+ if (!ret && !irqd_irq_disabled(data))
rzg2l_gpio_irq_enable(data);
+ raw_spin_unlock_irqrestore(&pctrl->lock.rlock, flags);
+
+ if (ret)
+ dev_crit(pctrl->dev, "Failed to set IRQ type for virq=%u\n", virq);
}
}
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index 30951f7131cd..1accdaaf282c 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -721,6 +721,7 @@ static struct miscdevice isst_if_char_driver = {
static const struct x86_cpu_id hpm_cpu_ids[] = {
X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, NULL),
X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, NULL),
{}
};
diff --git a/drivers/power/supply/mt6360_charger.c b/drivers/power/supply/mt6360_charger.c
index 1305cba61edd..aca123783efc 100644
--- a/drivers/power/supply/mt6360_charger.c
+++ b/drivers/power/supply/mt6360_charger.c
@@ -588,7 +588,7 @@ static const struct regulator_ops mt6360_chg_otg_ops = {
};
static const struct regulator_desc mt6360_otg_rdesc = {
- .of_match = "usb-otg-vbus",
+ .of_match = "usb-otg-vbus-regulator",
.name = "usb-otg-vbus",
.ops = &mt6360_chg_otg_ops,
.owner = THIS_MODULE,
diff --git a/drivers/power/supply/rt9455_charger.c b/drivers/power/supply/rt9455_charger.c
index c345a77f9f78..e4dbacd50a43 100644
--- a/drivers/power/supply/rt9455_charger.c
+++ b/drivers/power/supply/rt9455_charger.c
@@ -192,6 +192,7 @@ static const int rt9455_voreg_values[] = {
4450000, 4450000, 4450000, 4450000, 4450000, 4450000, 4450000, 4450000
};
+#if IS_ENABLED(CONFIG_USB_PHY)
/*
* When the charger is in boost mode, REG02[7:2] represent boost output
* voltage.
@@ -207,6 +208,7 @@ static const int rt9455_boost_voltage_values[] = {
5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000,
5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000,
};
+#endif
/* REG07[3:0] (VMREG) in uV */
static const int rt9455_vmreg_values[] = {
diff --git a/drivers/regulator/irq_helpers.c b/drivers/regulator/irq_helpers.c
index fe7ae0f3f46a..5ab1a0befe12 100644
--- a/drivers/regulator/irq_helpers.c
+++ b/drivers/regulator/irq_helpers.c
@@ -352,6 +352,9 @@ void *regulator_irq_helper(struct device *dev,
h->irq = irq;
h->desc = *d;
+ h->desc.name = devm_kstrdup(dev, d->name, GFP_KERNEL);
+ if (!h->desc.name)
+ return ERR_PTR(-ENOMEM);
ret = init_rdev_state(dev, h, rdev, common_errs, per_rdev_errs,
rdev_amount);
diff --git a/drivers/regulator/mt6360-regulator.c b/drivers/regulator/mt6360-regulator.c
index ad6587a378d0..24cc9fc94e90 100644
--- a/drivers/regulator/mt6360-regulator.c
+++ b/drivers/regulator/mt6360-regulator.c
@@ -319,15 +319,15 @@ static unsigned int mt6360_regulator_of_map_mode(unsigned int hw_mode)
}
}
-#define MT6360_REGULATOR_DESC(_name, _sname, ereg, emask, vreg, vmask, \
- mreg, mmask, streg, stmask, vranges, \
- vcnts, offon_delay, irq_tbls) \
+#define MT6360_REGULATOR_DESC(match, _name, _sname, ereg, emask, vreg, \
+ vmask, mreg, mmask, streg, stmask, \
+ vranges, vcnts, offon_delay, irq_tbls) \
{ \
.desc = { \
.name = #_name, \
.supply_name = #_sname, \
.id = MT6360_REGULATOR_##_name, \
- .of_match = of_match_ptr(#_name), \
+ .of_match = of_match_ptr(match), \
.regulators_node = of_match_ptr("regulator"), \
.of_map_mode = mt6360_regulator_of_map_mode, \
.owner = THIS_MODULE, \
@@ -351,21 +351,29 @@ static unsigned int mt6360_regulator_of_map_mode(unsigned int hw_mode)
}
static const struct mt6360_regulator_desc mt6360_regulator_descs[] = {
- MT6360_REGULATOR_DESC(BUCK1, BUCK1_VIN, 0x117, 0x40, 0x110, 0xff, 0x117, 0x30, 0x117, 0x04,
+ MT6360_REGULATOR_DESC("buck1", BUCK1, BUCK1_VIN,
+ 0x117, 0x40, 0x110, 0xff, 0x117, 0x30, 0x117, 0x04,
buck_vout_ranges, 256, 0, buck1_irq_tbls),
- MT6360_REGULATOR_DESC(BUCK2, BUCK2_VIN, 0x127, 0x40, 0x120, 0xff, 0x127, 0x30, 0x127, 0x04,
+ MT6360_REGULATOR_DESC("buck2", BUCK2, BUCK2_VIN,
+ 0x127, 0x40, 0x120, 0xff, 0x127, 0x30, 0x127, 0x04,
buck_vout_ranges, 256, 0, buck2_irq_tbls),
- MT6360_REGULATOR_DESC(LDO6, LDO_VIN3, 0x137, 0x40, 0x13B, 0xff, 0x137, 0x30, 0x137, 0x04,
+ MT6360_REGULATOR_DESC("ldo6", LDO6, LDO_VIN3,
+ 0x137, 0x40, 0x13B, 0xff, 0x137, 0x30, 0x137, 0x04,
ldo_vout_ranges1, 256, 0, ldo6_irq_tbls),
- MT6360_REGULATOR_DESC(LDO7, LDO_VIN3, 0x131, 0x40, 0x135, 0xff, 0x131, 0x30, 0x131, 0x04,
+ MT6360_REGULATOR_DESC("ldo7", LDO7, LDO_VIN3,
+ 0x131, 0x40, 0x135, 0xff, 0x131, 0x30, 0x131, 0x04,
ldo_vout_ranges1, 256, 0, ldo7_irq_tbls),
- MT6360_REGULATOR_DESC(LDO1, LDO_VIN1, 0x217, 0x40, 0x21B, 0xff, 0x217, 0x30, 0x217, 0x04,
+ MT6360_REGULATOR_DESC("ldo1", LDO1, LDO_VIN1,
+ 0x217, 0x40, 0x21B, 0xff, 0x217, 0x30, 0x217, 0x04,
ldo_vout_ranges2, 256, 0, ldo1_irq_tbls),
- MT6360_REGULATOR_DESC(LDO2, LDO_VIN1, 0x211, 0x40, 0x215, 0xff, 0x211, 0x30, 0x211, 0x04,
+ MT6360_REGULATOR_DESC("ldo2", LDO2, LDO_VIN1,
+ 0x211, 0x40, 0x215, 0xff, 0x211, 0x30, 0x211, 0x04,
ldo_vout_ranges2, 256, 0, ldo2_irq_tbls),
- MT6360_REGULATOR_DESC(LDO3, LDO_VIN1, 0x205, 0x40, 0x209, 0xff, 0x205, 0x30, 0x205, 0x04,
+ MT6360_REGULATOR_DESC("ldo3", LDO3, LDO_VIN1,
+ 0x205, 0x40, 0x209, 0xff, 0x205, 0x30, 0x205, 0x04,
ldo_vout_ranges2, 256, 100, ldo3_irq_tbls),
- MT6360_REGULATOR_DESC(LDO5, LDO_VIN2, 0x20B, 0x40, 0x20F, 0x7f, 0x20B, 0x30, 0x20B, 0x04,
+ MT6360_REGULATOR_DESC("ldo5", LDO5, LDO_VIN2,
+ 0x20B, 0x40, 0x20F, 0x7f, 0x20B, 0x30, 0x20B, 0x04,
ldo_vout_ranges3, 128, 100, ldo5_irq_tbls),
};
diff --git a/drivers/regulator/qcom-refgen-regulator.c b/drivers/regulator/qcom-refgen-regulator.c
index 656fe330d38f..063e12c08e75 100644
--- a/drivers/regulator/qcom-refgen-regulator.c
+++ b/drivers/regulator/qcom-refgen-regulator.c
@@ -140,6 +140,7 @@ static const struct of_device_id qcom_refgen_match_table[] = {
{ .compatible = "qcom,sm8250-refgen-regulator", .data = &sm8250_refgen_desc },
{ }
};
+MODULE_DEVICE_TABLE(of, qcom_refgen_match_table);
static struct platform_driver qcom_refgen_driver = {
.probe = qcom_refgen_probe,
diff --git a/drivers/regulator/vqmmc-ipq4019-regulator.c b/drivers/regulator/vqmmc-ipq4019-regulator.c
index 086da36abc0b..4955616517ce 100644
--- a/drivers/regulator/vqmmc-ipq4019-regulator.c
+++ b/drivers/regulator/vqmmc-ipq4019-regulator.c
@@ -84,6 +84,7 @@ static const struct of_device_id regulator_ipq4019_of_match[] = {
{ .compatible = "qcom,vqmmc-ipq4019-regulator", },
{},
};
+MODULE_DEVICE_TABLE(of, regulator_ipq4019_of_match);
static struct platform_driver ipq4019_regulator_driver = {
.probe = ipq4019_regulator_probe,
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 37173cb0f5f5..c57694be9bd3 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -162,7 +162,8 @@ struct raw3270_request *raw3270_request_alloc(size_t size)
/*
* Setup ccw.
*/
- rq->ccw.cda = virt_to_dma32(rq->buffer);
+ if (rq->buffer)
+ rq->ccw.cda = virt_to_dma32(rq->buffer);
rq->ccw.flags = CCW_FLAG_SLI;
return rq;
@@ -188,7 +189,8 @@ int raw3270_request_reset(struct raw3270_request *rq)
return -EBUSY;
rq->ccw.cmd_code = 0;
rq->ccw.count = 0;
- rq->ccw.cda = virt_to_dma32(rq->buffer);
+ if (rq->buffer)
+ rq->ccw.cda = virt_to_dma32(rq->buffer);
rq->ccw.flags = CCW_FLAG_SLI;
rq->rescnt = 0;
rq->rc = 0;
diff --git a/drivers/s390/cio/cio_inject.c b/drivers/s390/cio/cio_inject.c
index 8613fa937237..a2e771ebae8e 100644
--- a/drivers/s390/cio/cio_inject.c
+++ b/drivers/s390/cio/cio_inject.c
@@ -95,7 +95,7 @@ static ssize_t crw_inject_write(struct file *file, const char __user *buf,
return -EINVAL;
}
- buffer = vmemdup_user(buf, lbuf);
+ buffer = memdup_user_nul(buf, lbuf);
if (IS_ERR(buffer))
return -ENOMEM;
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
index 0a3a678ffc7e..6087547328ce 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.c
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -658,7 +658,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
(int)prepcblk->ccp_rtcode,
(int)prepcblk->ccp_rscode);
if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
- rc = -EAGAIN;
+ rc = -EBUSY;
else
rc = -EIO;
goto out;
@@ -1263,7 +1263,7 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
(int)prepcblk->ccp_rtcode,
(int)prepcblk->ccp_rscode);
if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
- rc = -EAGAIN;
+ rc = -EBUSY;
else
rc = -EIO;
goto out;
@@ -1426,7 +1426,7 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
(int)prepcblk->ccp_rtcode,
(int)prepcblk->ccp_rscode);
if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
- rc = -EAGAIN;
+ rc = -EBUSY;
else
rc = -EIO;
goto out;
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
index eb7f5489ccf9..9bcf8fc69ebe 100644
--- a/drivers/s390/crypto/zcrypt_ep11misc.c
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -556,13 +556,29 @@ static int check_reply_pl(const u8 *pl, const char *func)
pl += 2;
ret = *((u32 *)pl);
if (ret != 0) {
- ZCRYPT_DBF_ERR("%s return value 0x%04x != 0\n", func, ret);
+ ZCRYPT_DBF_ERR("%s return value 0x%08x != 0\n", func, ret);
return -EIO;
}
return 0;
}
+/* Check ep11 reply cprb, return 0 or suggested errno value. */
+static int check_reply_cprb(const struct ep11_cprb *rep, const char *func)
+{
+ /* check ep11 reply return code field */
+ if (rep->ret_code) {
+ ZCRYPT_DBF_ERR("%s ep11 reply ret_code=0x%08x\n", __func__,
+ rep->ret_code);
+ if (rep->ret_code == 0x000c0003)
+ return -EBUSY;
+ else
+ return -EIO;
+ }
+
+ return 0;
+}
+
/*
* Helper function which does an ep11 query with given query type.
*/
@@ -627,6 +643,12 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
goto out;
}
+ /* check ep11 reply cprb */
+ rc = check_reply_cprb(rep, __func__);
+ if (rc)
+ goto out;
+
+ /* check payload */
rc = check_reply_pl((u8 *)rep_pl, __func__);
if (rc)
goto out;
@@ -877,6 +899,12 @@ static int _ep11_genaeskey(u16 card, u16 domain,
goto out;
}
+ /* check ep11 reply cprb */
+ rc = check_reply_cprb(rep, __func__);
+ if (rc)
+ goto out;
+
+ /* check payload */
rc = check_reply_pl((u8 *)rep_pl, __func__);
if (rc)
goto out;
@@ -1028,6 +1056,12 @@ static int ep11_cryptsingle(u16 card, u16 domain,
goto out;
}
+ /* check ep11 reply cprb */
+ rc = check_reply_cprb(rep, __func__);
+ if (rc)
+ goto out;
+
+ /* check payload */
rc = check_reply_pl((u8 *)rep_pl, __func__);
if (rc)
goto out;
@@ -1185,6 +1219,12 @@ static int _ep11_unwrapkey(u16 card, u16 domain,
goto out;
}
+ /* check ep11 reply cprb */
+ rc = check_reply_cprb(rep, __func__);
+ if (rc)
+ goto out;
+
+ /* check payload */
rc = check_reply_pl((u8 *)rep_pl, __func__);
if (rc)
goto out;
@@ -1339,6 +1379,12 @@ static int _ep11_wrapkey(u16 card, u16 domain,
goto out;
}
+ /* check ep11 reply cprb */
+ rc = check_reply_cprb(rep, __func__);
+ if (rc)
+ goto out;
+
+ /* check payload */
rc = check_reply_pl((u8 *)rep_pl, __func__);
if (rc)
goto out;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index f0b8b709649f..a3adaec5504e 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -364,30 +364,33 @@ out:
return rc;
}
+static void qeth_free_cq(struct qeth_card *card)
+{
+ if (card->qdio.c_q) {
+ qeth_free_qdio_queue(card->qdio.c_q);
+ card->qdio.c_q = NULL;
+ }
+}
+
static int qeth_alloc_cq(struct qeth_card *card)
{
if (card->options.cq == QETH_CQ_ENABLED) {
QETH_CARD_TEXT(card, 2, "cqon");
- card->qdio.c_q = qeth_alloc_qdio_queue();
if (!card->qdio.c_q) {
- dev_err(&card->gdev->dev, "Failed to create completion queue\n");
- return -ENOMEM;
+ card->qdio.c_q = qeth_alloc_qdio_queue();
+ if (!card->qdio.c_q) {
+ dev_err(&card->gdev->dev,
+ "Failed to create completion queue\n");
+ return -ENOMEM;
+ }
}
} else {
QETH_CARD_TEXT(card, 2, "nocq");
- card->qdio.c_q = NULL;
+ qeth_free_cq(card);
}
return 0;
}
-static void qeth_free_cq(struct qeth_card *card)
-{
- if (card->qdio.c_q) {
- qeth_free_qdio_queue(card->qdio.c_q);
- card->qdio.c_q = NULL;
- }
-}
-
static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
int delayed)
{
@@ -2628,6 +2631,10 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
QETH_CARD_TEXT(card, 2, "allcqdbf");
+ /* completion */
+ if (qeth_alloc_cq(card))
+ goto out_err;
+
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
return 0;
@@ -2663,10 +2670,6 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
}
- /* completion */
- if (qeth_alloc_cq(card))
- goto out_freeoutq;
-
return 0;
out_freeoutq:
@@ -2677,6 +2680,8 @@ out_freeoutq:
qeth_free_buffer_pool(card);
out_buffer_pool:
atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
+ qeth_free_cq(card);
+out_err:
return -ENOMEM;
}
@@ -2684,11 +2689,12 @@ static void qeth_free_qdio_queues(struct qeth_card *card)
{
int i, j;
+ qeth_free_cq(card);
+
if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
QETH_QDIO_UNINITIALIZED)
return;
- qeth_free_cq(card);
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
if (card->qdio.in_q->bufs[j].rx_skb) {
consume_skb(card->qdio.in_q->bufs[j].rx_skb);
@@ -3742,24 +3748,11 @@ static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
{
- int rc;
-
- if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
- rc = -1;
- goto out;
- } else {
- if (card->options.cq == cq) {
- rc = 0;
- goto out;
- }
-
- qeth_free_qdio_queues(card);
- card->options.cq = cq;
- rc = 0;
- }
-out:
- return rc;
+ if (card->options.cq == QETH_CQ_NOTAVAILABLE)
+ return -1;
+ card->options.cq = cq;
+ return 0;
}
EXPORT_SYMBOL_GPL(qeth_configure_cq);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 58fdf679341d..65cdc8b77e35 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3120,6 +3120,7 @@ static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer)
{
struct scsi_device *sdp = sdkp->device;
const struct scsi_io_group_descriptor *desc, *start, *end;
+ u16 permanent_stream_count_old;
struct scsi_sense_hdr sshdr;
struct scsi_mode_data data;
int res;
@@ -3140,12 +3141,13 @@ static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer)
for (desc = start; desc < end; desc++)
if (!desc->st_enble || !sd_is_perm_stream(sdkp, desc - start))
break;
+ permanent_stream_count_old = sdkp->permanent_stream_count;
sdkp->permanent_stream_count = desc - start;
if (sdkp->rscs && sdkp->permanent_stream_count < 2)
sd_printk(KERN_INFO, sdkp,
"Unexpected: RSCS has been set and the permanent stream count is %u\n",
sdkp->permanent_stream_count);
- else if (sdkp->permanent_stream_count)
+ else if (sdkp->permanent_stream_count != permanent_stream_count_old)
sd_printk(KERN_INFO, sdkp, "permanent stream count = %d\n",
sdkp->permanent_stream_count);
}
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index 7cc219d78551..e358ac5b4509 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -623,7 +623,7 @@ static int spi_engine_probe(struct platform_device *pdev)
version = readl(spi_engine->base + ADI_AXI_REG_VERSION);
if (ADI_AXI_PCORE_VER_MAJOR(version) != 1) {
- dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%c\n",
+ dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%u\n",
ADI_AXI_PCORE_VER_MAJOR(version),
ADI_AXI_PCORE_VER_MINOR(version),
ADI_AXI_PCORE_VER_PATCH(version));
diff --git a/drivers/spi/spi-hisi-kunpeng.c b/drivers/spi/spi-hisi-kunpeng.c
index 35ef5e8e2ffd..77e9738e42f6 100644
--- a/drivers/spi/spi-hisi-kunpeng.c
+++ b/drivers/spi/spi-hisi-kunpeng.c
@@ -151,8 +151,6 @@ static const struct debugfs_reg32 hisi_spi_regs[] = {
HISI_SPI_DBGFS_REG("ENR", HISI_SPI_ENR),
HISI_SPI_DBGFS_REG("FIFOC", HISI_SPI_FIFOC),
HISI_SPI_DBGFS_REG("IMR", HISI_SPI_IMR),
- HISI_SPI_DBGFS_REG("DIN", HISI_SPI_DIN),
- HISI_SPI_DBGFS_REG("DOUT", HISI_SPI_DOUT),
HISI_SPI_DBGFS_REG("SR", HISI_SPI_SR),
HISI_SPI_DBGFS_REG("RISR", HISI_SPI_RISR),
HISI_SPI_DBGFS_REG("ISR", HISI_SPI_ISR),
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index ff75838c1b5d..a2c467d9e92f 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -4523,6 +4523,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
wait_for_completion(&done);
status = message->status;
}
+ message->complete = NULL;
message->context = NULL;
return status;
diff --git a/drivers/thermal/thermal_debugfs.c b/drivers/thermal/thermal_debugfs.c
index d78d54ae2605..5693cc8b231a 100644
--- a/drivers/thermal/thermal_debugfs.c
+++ b/drivers/thermal/thermal_debugfs.c
@@ -139,11 +139,13 @@ struct tz_episode {
* we keep track of the current position in the history array.
*
* @tz_episodes: a list of thermal mitigation episodes
+ * @tz: thermal zone this object belongs to
* @trips_crossed: an array of trip points crossed by id
* @nr_trips: the number of trip points currently being crossed
*/
struct tz_debugfs {
struct list_head tz_episodes;
+ struct thermal_zone_device *tz;
int *trips_crossed;
int nr_trips;
};
@@ -503,15 +505,23 @@ void thermal_debug_cdev_add(struct thermal_cooling_device *cdev)
*/
void thermal_debug_cdev_remove(struct thermal_cooling_device *cdev)
{
- struct thermal_debugfs *thermal_dbg = cdev->debugfs;
+ struct thermal_debugfs *thermal_dbg;
- if (!thermal_dbg)
+ mutex_lock(&cdev->lock);
+
+ thermal_dbg = cdev->debugfs;
+ if (!thermal_dbg) {
+ mutex_unlock(&cdev->lock);
return;
+ }
+
+ cdev->debugfs = NULL;
+
+ mutex_unlock(&cdev->lock);
mutex_lock(&thermal_dbg->lock);
thermal_debugfs_cdev_clear(&thermal_dbg->cdev_dbg);
- cdev->debugfs = NULL;
mutex_unlock(&thermal_dbg->lock);
@@ -716,8 +726,7 @@ out:
static void *tze_seq_start(struct seq_file *s, loff_t *pos)
{
- struct thermal_zone_device *tz = s->private;
- struct thermal_debugfs *thermal_dbg = tz->debugfs;
+ struct thermal_debugfs *thermal_dbg = s->private;
struct tz_debugfs *tz_dbg = &thermal_dbg->tz_dbg;
mutex_lock(&thermal_dbg->lock);
@@ -727,8 +736,7 @@ static void *tze_seq_start(struct seq_file *s, loff_t *pos)
static void *tze_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
- struct thermal_zone_device *tz = s->private;
- struct thermal_debugfs *thermal_dbg = tz->debugfs;
+ struct thermal_debugfs *thermal_dbg = s->private;
struct tz_debugfs *tz_dbg = &thermal_dbg->tz_dbg;
return seq_list_next(v, &tz_dbg->tz_episodes, pos);
@@ -736,15 +744,15 @@ static void *tze_seq_next(struct seq_file *s, void *v, loff_t *pos)
static void tze_seq_stop(struct seq_file *s, void *v)
{
- struct thermal_zone_device *tz = s->private;
- struct thermal_debugfs *thermal_dbg = tz->debugfs;
+ struct thermal_debugfs *thermal_dbg = s->private;
mutex_unlock(&thermal_dbg->lock);
}
static int tze_seq_show(struct seq_file *s, void *v)
{
- struct thermal_zone_device *tz = s->private;
+ struct thermal_debugfs *thermal_dbg = s->private;
+ struct thermal_zone_device *tz = thermal_dbg->tz_dbg.tz;
struct thermal_trip *trip;
struct tz_episode *tze;
const char *type;
@@ -810,6 +818,8 @@ void thermal_debug_tz_add(struct thermal_zone_device *tz)
tz_dbg = &thermal_dbg->tz_dbg;
+ tz_dbg->tz = tz;
+
tz_dbg->trips_crossed = kzalloc(sizeof(int) * tz->num_trips, GFP_KERNEL);
if (!tz_dbg->trips_crossed) {
thermal_debugfs_remove_id(thermal_dbg);
@@ -818,23 +828,44 @@ void thermal_debug_tz_add(struct thermal_zone_device *tz)
INIT_LIST_HEAD(&tz_dbg->tz_episodes);
- debugfs_create_file("mitigations", 0400, thermal_dbg->d_top, tz, &tze_fops);
+ debugfs_create_file("mitigations", 0400, thermal_dbg->d_top,
+ thermal_dbg, &tze_fops);
tz->debugfs = thermal_dbg;
}
void thermal_debug_tz_remove(struct thermal_zone_device *tz)
{
- struct thermal_debugfs *thermal_dbg = tz->debugfs;
+ struct thermal_debugfs *thermal_dbg;
+ struct tz_episode *tze, *tmp;
+ struct tz_debugfs *tz_dbg;
+ int *trips_crossed;
- if (!thermal_dbg)
+ mutex_lock(&tz->lock);
+
+ thermal_dbg = tz->debugfs;
+ if (!thermal_dbg) {
+ mutex_unlock(&tz->lock);
return;
+ }
+
+ tz->debugfs = NULL;
+
+ mutex_unlock(&tz->lock);
+
+ tz_dbg = &thermal_dbg->tz_dbg;
mutex_lock(&thermal_dbg->lock);
- tz->debugfs = NULL;
+ trips_crossed = tz_dbg->trips_crossed;
+
+ list_for_each_entry_safe(tze, tmp, &tz_dbg->tz_episodes, node) {
+ list_del(&tze->node);
+ kfree(tze);
+ }
mutex_unlock(&thermal_dbg->lock);
thermal_debugfs_remove_id(thermal_dbg);
+ kfree(trips_crossed);
}
diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c
index c60794264da2..45cb8149d374 100644
--- a/fs/bcachefs/btree_node_scan.c
+++ b/fs/bcachefs/btree_node_scan.c
@@ -57,13 +57,14 @@ static void found_btree_node_to_key(struct bkey_i *k, const struct found_btree_n
bp->v.seq = cpu_to_le64(f->cookie);
bp->v.sectors_written = 0;
bp->v.flags = 0;
+ bp->v.sectors_written = cpu_to_le16(f->sectors_written);
bp->v.min_key = f->min_key;
SET_BTREE_PTR_RANGE_UPDATED(&bp->v, f->range_updated);
memcpy(bp->v.start, f->ptrs, sizeof(struct bch_extent_ptr) * f->nr_ptrs);
}
static bool found_btree_node_is_readable(struct btree_trans *trans,
- const struct found_btree_node *f)
+ struct found_btree_node *f)
{
struct { __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX); } k;
@@ -71,8 +72,10 @@ static bool found_btree_node_is_readable(struct btree_trans *trans,
struct btree *b = bch2_btree_node_get_noiter(trans, &k.k, f->btree_id, f->level, false);
bool ret = !IS_ERR_OR_NULL(b);
- if (ret)
+ if (ret) {
+ f->sectors_written = b->written;
six_unlock_read(&b->c.lock);
+ }
/*
* We might update this node's range; if that happens, we need the node
diff --git a/fs/bcachefs/btree_node_scan_types.h b/fs/bcachefs/btree_node_scan_types.h
index abb7b27d556a..5cfaeb5ac831 100644
--- a/fs/bcachefs/btree_node_scan_types.h
+++ b/fs/bcachefs/btree_node_scan_types.h
@@ -9,6 +9,7 @@ struct found_btree_node {
bool overwritten:1;
u8 btree_id;
u8 level;
+ unsigned sectors_written;
u32 seq;
u64 cookie;
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 941401a210f5..82f179258867 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -525,7 +525,6 @@ int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
"different types of data in same bucket: %s, %s",
bch2_data_type_str(g->data_type),
bch2_data_type_str(data_type))) {
- BUG();
ret = -EIO;
goto err;
}
@@ -629,7 +628,6 @@ int bch2_check_bucket_ref(struct btree_trans *trans,
bch2_data_type_str(ptr_data_type),
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf));
- BUG();
ret = -EIO;
goto err;
}
diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c
index ca4a066e9a54..0f95d7fb5ec0 100644
--- a/fs/bcachefs/inode.c
+++ b/fs/bcachefs/inode.c
@@ -606,7 +606,7 @@ int bch2_trigger_inode(struct btree_trans *trans,
struct bkey_s new,
unsigned flags)
{
- s64 nr = bkey_is_inode(new.k) - bkey_is_inode(old.k);
+ s64 nr = (s64) bkey_is_inode(new.k) - (s64) bkey_is_inode(old.k);
if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
if (nr) {
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 55f3ba6a831c..0493272a7668 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3758,15 +3758,43 @@ static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
goto drop_write;
}
- down_write(&fs_info->subvol_sem);
-
switch (sa->cmd) {
case BTRFS_QUOTA_CTL_ENABLE:
case BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA:
+ down_write(&fs_info->subvol_sem);
ret = btrfs_quota_enable(fs_info, sa);
+ up_write(&fs_info->subvol_sem);
break;
case BTRFS_QUOTA_CTL_DISABLE:
+ /*
+ * Lock the cleaner mutex to prevent races with concurrent
+ * relocation, because relocation may be building backrefs for
+ * blocks of the quota root while we are deleting the root. This
+ * is like dropping fs roots of deleted snapshots/subvolumes, we
+ * need the same protection.
+ *
+ * This also prevents races between concurrent tasks trying to
+ * disable quotas, because we will unlock and relock
+ * qgroup_ioctl_lock across BTRFS_FS_QUOTA_ENABLED changes.
+ *
+ * We take this here because we have the dependency of
+ *
+ * inode_lock -> subvol_sem
+ *
+ * because of rename. With relocation we can prealloc extents,
+ * so that makes the dependency chain
+ *
+ * cleaner_mutex -> inode_lock -> subvol_sem
+ *
+ * so we must take the cleaner_mutex here before we take the
+ * subvol_sem. The deadlock can't actually happen, but this
+ * quiets lockdep.
+ */
+ mutex_lock(&fs_info->cleaner_mutex);
+ down_write(&fs_info->subvol_sem);
ret = btrfs_quota_disable(fs_info);
+ up_write(&fs_info->subvol_sem);
+ mutex_unlock(&fs_info->cleaner_mutex);
break;
default:
ret = -EINVAL;
@@ -3774,7 +3802,6 @@ static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
}
kfree(sa);
- up_write(&fs_info->subvol_sem);
drop_write:
mnt_drop_write_file(file);
return ret;
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index b749ba45da2b..c2a42bcde98e 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -1188,6 +1188,7 @@ struct btrfs_ordered_extent *btrfs_split_ordered_extent(
ordered->disk_bytenr += len;
ordered->num_bytes -= len;
ordered->disk_num_bytes -= len;
+ ordered->ram_bytes -= len;
if (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags)) {
ASSERT(ordered->bytes_left == 0);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index cf8820ce7aa2..364acc9bbe73 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1342,16 +1342,10 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
lockdep_assert_held_write(&fs_info->subvol_sem);
/*
- * Lock the cleaner mutex to prevent races with concurrent relocation,
- * because relocation may be building backrefs for blocks of the quota
- * root while we are deleting the root. This is like dropping fs roots
- * of deleted snapshots/subvolumes, we need the same protection.
- *
- * This also prevents races between concurrent tasks trying to disable
- * quotas, because we will unlock and relock qgroup_ioctl_lock across
- * BTRFS_FS_QUOTA_ENABLED changes.
+ * Relocation will mess with backrefs, so make sure we have the
+ * cleaner_mutex held to protect us from relocate.
*/
- mutex_lock(&fs_info->cleaner_mutex);
+ lockdep_assert_held(&fs_info->cleaner_mutex);
mutex_lock(&fs_info->qgroup_ioctl_lock);
if (!fs_info->quota_root)
@@ -1373,9 +1367,13 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
btrfs_qgroup_wait_for_completion(fs_info, false);
+ /*
+ * We have nothing held here and no trans handle, just return the error
+ * if there is one.
+ */
ret = flush_reservations(fs_info);
if (ret)
- goto out_unlock_cleaner;
+ return ret;
/*
* 1 For the root item
@@ -1439,9 +1437,6 @@ out:
btrfs_end_transaction(trans);
else if (trans)
ret = btrfs_commit_transaction(trans);
-out_unlock_cleaner:
- mutex_unlock(&fs_info->cleaner_mutex);
-
return ret;
}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index f15591f3e54f..ef6bd2f4251b 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3455,6 +3455,7 @@ again:
* alignment and size).
*/
ret = -EUCLEAN;
+ mutex_unlock(&fs_info->reclaim_bgs_lock);
goto error;
}
diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c
index 8aff1a724805..62da538d91cb 100644
--- a/fs/erofs/fscache.c
+++ b/fs/erofs/fscache.c
@@ -151,7 +151,7 @@ static int erofs_fscache_read_io_async(struct fscache_cookie *cookie,
if (WARN_ON(len == 0))
source = NETFS_INVALID_READ;
if (source != NETFS_READ_FROM_CACHE) {
- erofs_err(NULL, "prepare_read failed (source %d)", source);
+ erofs_err(NULL, "prepare_ondemand_read failed (source %d)", source);
return -EIO;
}
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index 39c67119f43b..d28ccfc0352b 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -84,13 +84,6 @@ struct erofs_dev_context {
bool flatdev;
};
-struct erofs_fs_context {
- struct erofs_mount_opts opt;
- struct erofs_dev_context *devs;
- char *fsid;
- char *domain_id;
-};
-
/* all filesystem-wide lz4 configurations */
struct erofs_sb_lz4_info {
/* # of pages needed for EROFS lz4 rolling decompression */
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index c0eb139adb07..30b49b2eee53 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -370,18 +370,18 @@ out:
return ret;
}
-static void erofs_default_options(struct erofs_fs_context *ctx)
+static void erofs_default_options(struct erofs_sb_info *sbi)
{
#ifdef CONFIG_EROFS_FS_ZIP
- ctx->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND;
- ctx->opt.max_sync_decompress_pages = 3;
- ctx->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO;
+ sbi->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND;
+ sbi->opt.max_sync_decompress_pages = 3;
+ sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO;
#endif
#ifdef CONFIG_EROFS_FS_XATTR
- set_opt(&ctx->opt, XATTR_USER);
+ set_opt(&sbi->opt, XATTR_USER);
#endif
#ifdef CONFIG_EROFS_FS_POSIX_ACL
- set_opt(&ctx->opt, POSIX_ACL);
+ set_opt(&sbi->opt, POSIX_ACL);
#endif
}
@@ -426,16 +426,16 @@ static const struct fs_parameter_spec erofs_fs_parameters[] = {
static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
{
#ifdef CONFIG_FS_DAX
- struct erofs_fs_context *ctx = fc->fs_private;
+ struct erofs_sb_info *sbi = fc->s_fs_info;
switch (mode) {
case EROFS_MOUNT_DAX_ALWAYS:
- set_opt(&ctx->opt, DAX_ALWAYS);
- clear_opt(&ctx->opt, DAX_NEVER);
+ set_opt(&sbi->opt, DAX_ALWAYS);
+ clear_opt(&sbi->opt, DAX_NEVER);
return true;
case EROFS_MOUNT_DAX_NEVER:
- set_opt(&ctx->opt, DAX_NEVER);
- clear_opt(&ctx->opt, DAX_ALWAYS);
+ set_opt(&sbi->opt, DAX_NEVER);
+ clear_opt(&sbi->opt, DAX_ALWAYS);
return true;
default:
DBG_BUGON(1);
@@ -450,7 +450,7 @@ static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
static int erofs_fc_parse_param(struct fs_context *fc,
struct fs_parameter *param)
{
- struct erofs_fs_context *ctx = fc->fs_private;
+ struct erofs_sb_info *sbi = fc->s_fs_info;
struct fs_parse_result result;
struct erofs_device_info *dif;
int opt, ret;
@@ -463,9 +463,9 @@ static int erofs_fc_parse_param(struct fs_context *fc,
case Opt_user_xattr:
#ifdef CONFIG_EROFS_FS_XATTR
if (result.boolean)
- set_opt(&ctx->opt, XATTR_USER);
+ set_opt(&sbi->opt, XATTR_USER);
else
- clear_opt(&ctx->opt, XATTR_USER);
+ clear_opt(&sbi->opt, XATTR_USER);
#else
errorfc(fc, "{,no}user_xattr options not supported");
#endif
@@ -473,16 +473,16 @@ static int erofs_fc_parse_param(struct fs_context *fc,
case Opt_acl:
#ifdef CONFIG_EROFS_FS_POSIX_ACL
if (result.boolean)
- set_opt(&ctx->opt, POSIX_ACL);
+ set_opt(&sbi->opt, POSIX_ACL);
else
- clear_opt(&ctx->opt, POSIX_ACL);
+ clear_opt(&sbi->opt, POSIX_ACL);
#else
errorfc(fc, "{,no}acl options not supported");
#endif
break;
case Opt_cache_strategy:
#ifdef CONFIG_EROFS_FS_ZIP
- ctx->opt.cache_strategy = result.uint_32;
+ sbi->opt.cache_strategy = result.uint_32;
#else
errorfc(fc, "compression not supported, cache_strategy ignored");
#endif
@@ -504,27 +504,27 @@ static int erofs_fc_parse_param(struct fs_context *fc,
kfree(dif);
return -ENOMEM;
}
- down_write(&ctx->devs->rwsem);
- ret = idr_alloc(&ctx->devs->tree, dif, 0, 0, GFP_KERNEL);
- up_write(&ctx->devs->rwsem);
+ down_write(&sbi->devs->rwsem);
+ ret = idr_alloc(&sbi->devs->tree, dif, 0, 0, GFP_KERNEL);
+ up_write(&sbi->devs->rwsem);
if (ret < 0) {
kfree(dif->path);
kfree(dif);
return ret;
}
- ++ctx->devs->extra_devices;
+ ++sbi->devs->extra_devices;
break;
#ifdef CONFIG_EROFS_FS_ONDEMAND
case Opt_fsid:
- kfree(ctx->fsid);
- ctx->fsid = kstrdup(param->string, GFP_KERNEL);
- if (!ctx->fsid)
+ kfree(sbi->fsid);
+ sbi->fsid = kstrdup(param->string, GFP_KERNEL);
+ if (!sbi->fsid)
return -ENOMEM;
break;
case Opt_domain_id:
- kfree(ctx->domain_id);
- ctx->domain_id = kstrdup(param->string, GFP_KERNEL);
- if (!ctx->domain_id)
+ kfree(sbi->domain_id);
+ sbi->domain_id = kstrdup(param->string, GFP_KERNEL);
+ if (!sbi->domain_id)
return -ENOMEM;
break;
#else
@@ -581,8 +581,7 @@ static const struct export_operations erofs_export_ops = {
static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct inode *inode;
- struct erofs_sb_info *sbi;
- struct erofs_fs_context *ctx = fc->fs_private;
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
int err;
sb->s_magic = EROFS_SUPER_MAGIC;
@@ -590,19 +589,6 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_op = &erofs_sops;
- sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
- if (!sbi)
- return -ENOMEM;
-
- sb->s_fs_info = sbi;
- sbi->opt = ctx->opt;
- sbi->devs = ctx->devs;
- ctx->devs = NULL;
- sbi->fsid = ctx->fsid;
- ctx->fsid = NULL;
- sbi->domain_id = ctx->domain_id;
- ctx->domain_id = NULL;
-
sbi->blkszbits = PAGE_SHIFT;
if (erofs_is_fscache_mode(sb)) {
sb->s_blocksize = PAGE_SIZE;
@@ -706,9 +692,9 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
static int erofs_fc_get_tree(struct fs_context *fc)
{
- struct erofs_fs_context *ctx = fc->fs_private;
+ struct erofs_sb_info *sbi = fc->s_fs_info;
- if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && ctx->fsid)
+ if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid)
return get_tree_nodev(fc, erofs_fc_fill_super);
return get_tree_bdev(fc, erofs_fc_fill_super);
@@ -718,19 +704,19 @@ static int erofs_fc_reconfigure(struct fs_context *fc)
{
struct super_block *sb = fc->root->d_sb;
struct erofs_sb_info *sbi = EROFS_SB(sb);
- struct erofs_fs_context *ctx = fc->fs_private;
+ struct erofs_sb_info *new_sbi = fc->s_fs_info;
DBG_BUGON(!sb_rdonly(sb));
- if (ctx->fsid || ctx->domain_id)
+ if (new_sbi->fsid || new_sbi->domain_id)
erofs_info(sb, "ignoring reconfiguration for fsid|domain_id.");
- if (test_opt(&ctx->opt, POSIX_ACL))
+ if (test_opt(&new_sbi->opt, POSIX_ACL))
fc->sb_flags |= SB_POSIXACL;
else
fc->sb_flags &= ~SB_POSIXACL;
- sbi->opt = ctx->opt;
+ sbi->opt = new_sbi->opt;
fc->sb_flags |= SB_RDONLY;
return 0;
@@ -761,12 +747,15 @@ static void erofs_free_dev_context(struct erofs_dev_context *devs)
static void erofs_fc_free(struct fs_context *fc)
{
- struct erofs_fs_context *ctx = fc->fs_private;
+ struct erofs_sb_info *sbi = fc->s_fs_info;
- erofs_free_dev_context(ctx->devs);
- kfree(ctx->fsid);
- kfree(ctx->domain_id);
- kfree(ctx);
+ if (!sbi)
+ return;
+
+ erofs_free_dev_context(sbi->devs);
+ kfree(sbi->fsid);
+ kfree(sbi->domain_id);
+ kfree(sbi);
}
static const struct fs_context_operations erofs_context_ops = {
@@ -778,38 +767,35 @@ static const struct fs_context_operations erofs_context_ops = {
static int erofs_init_fs_context(struct fs_context *fc)
{
- struct erofs_fs_context *ctx;
+ struct erofs_sb_info *sbi;
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
+ sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
+ if (!sbi)
return -ENOMEM;
- ctx->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL);
- if (!ctx->devs) {
- kfree(ctx);
+
+ sbi->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL);
+ if (!sbi->devs) {
+ kfree(sbi);
return -ENOMEM;
}
- fc->fs_private = ctx;
+ fc->s_fs_info = sbi;
- idr_init(&ctx->devs->tree);
- init_rwsem(&ctx->devs->rwsem);
- erofs_default_options(ctx);
+ idr_init(&sbi->devs->tree);
+ init_rwsem(&sbi->devs->rwsem);
+ erofs_default_options(sbi);
fc->ops = &erofs_context_ops;
return 0;
}
static void erofs_kill_sb(struct super_block *sb)
{
- struct erofs_sb_info *sbi;
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
- if (erofs_is_fscache_mode(sb))
+ if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid)
kill_anon_super(sb);
else
kill_block_super(sb);
- sbi = EROFS_SB(sb);
- if (!sbi)
- return;
-
erofs_free_dev_context(sbi->devs);
fs_put_dax(sbi->dax_dev, NULL);
erofs_fscache_unregister_fs(sb);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index c709c296ea9a..acef52ecb1bb 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -2429,7 +2429,12 @@ static int nfs_net_init(struct net *net)
struct nfs_net *nn = net_generic(net, nfs_net_id);
nfs_clients_init(net);
- rpc_proc_register(net, &nn->rpcstats);
+
+ if (!rpc_proc_register(net, &nn->rpcstats)) {
+ nfs_clients_exit(net);
+ return -ENOMEM;
+ }
+
return nfs_fs_proc_net_init(net);
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 1955481832e0..a644460f3a5e 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -3515,6 +3515,7 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
args.exp = exp;
args.dentry = dentry;
args.ignore_crossmnt = (ignore_crossmnt != 0);
+ args.acl = NULL;
/*
* Make a local copy of the attribute bitmap that can be modified.
@@ -3573,7 +3574,6 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
} else
args.fhp = fhp;
- args.acl = NULL;
if (attrmask[0] & FATTR4_WORD0_ACL) {
err = nfsd4_get_nfs4_acl(rqstp, dentry, &args.acl);
if (err == -EOPNOTSUPP)
diff --git a/include/linux/filter.h b/include/linux/filter.h
index c99bc3df2d28..219ee7a76874 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -963,6 +963,7 @@ bool bpf_jit_supports_far_kfunc_call(void);
bool bpf_jit_supports_exceptions(void);
bool bpf_jit_supports_ptr_xchg(void);
bool bpf_jit_supports_arena(void);
+u64 bpf_arch_uaddress_limit(void);
void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
bool bpf_helper_changes_pkt_data(void *func);
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 4660582a3302..ed180ca419da 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -320,13 +320,13 @@ devm_regulator_get_exclusive(struct device *dev, const char *id)
static inline int devm_regulator_get_enable(struct device *dev, const char *id)
{
- return -ENODEV;
+ return 0;
}
static inline int devm_regulator_get_enable_optional(struct device *dev,
const char *id)
{
- return -ENODEV;
+ return 0;
}
static inline struct regulator *__must_check
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index e65ec3fd2799..a509caf823d6 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -461,10 +461,12 @@ static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
{
+ read_lock_bh(&sk->sk_callback_lock);
if (psock->saved_data_ready)
psock->saved_data_ready(sk);
else
sk->sk_data_ready(sk);
+ read_unlock_bh(&sk->sk_callback_lock);
}
static inline void psock_set_prog(struct bpf_prog **pprog,
diff --git a/include/net/gro.h b/include/net/gro.h
index 50f1e403dbbb..c1d4ca0463a1 100644
--- a/include/net/gro.h
+++ b/include/net/gro.h
@@ -87,6 +87,15 @@ struct napi_gro_cb {
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
__wsum csum;
+
+ /* L3 offsets */
+ union {
+ struct {
+ u16 network_offset;
+ u16 inner_network_offset;
+ };
+ u16 network_offsets[2];
+ };
};
#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
diff --git a/kernel/bounds.c b/kernel/bounds.c
index c5a9fcd2d622..29b2cd00df2c 100644
--- a/kernel/bounds.c
+++ b/kernel/bounds.c
@@ -19,7 +19,7 @@ int main(void)
DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES);
#ifdef CONFIG_SMP
- DEFINE(NR_CPUS_BITS, bits_per(CONFIG_NR_CPUS));
+ DEFINE(NR_CPUS_BITS, order_base_2(CONFIG_NR_CPUS));
#endif
DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
#ifdef CONFIG_LRU_GEN
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 696bc55de8e8..1ea5ce5bb599 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -2942,6 +2942,15 @@ bool __weak bpf_jit_supports_arena(void)
return false;
}
+u64 __weak bpf_arch_uaddress_limit(void)
+{
+#if defined(CONFIG_64BIT) && defined(CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE)
+ return TASK_SIZE;
+#else
+ return 0;
+#endif
+}
+
/* Return TRUE if the JIT backend satisfies the following two conditions:
* 1) JIT backend supports atomic_xchg() on pointer-sized words.
* 2) Under the specific arch, the implementation of xchg() is the same
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 98188379d5c7..cb7ad1f795e1 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -18289,8 +18289,7 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
f = fdget(fd);
map = __bpf_map_get(f);
if (IS_ERR(map)) {
- verbose(env, "fd %d is not pointing to valid bpf_map\n",
- insn[0].imm);
+ verbose(env, "fd %d is not pointing to valid bpf_map\n", fd);
return PTR_ERR(map);
}
@@ -19676,6 +19675,36 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
goto next_insn;
}
+ /* Make it impossible to de-reference a userspace address */
+ if (BPF_CLASS(insn->code) == BPF_LDX &&
+ (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
+ BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) {
+ struct bpf_insn *patch = &insn_buf[0];
+ u64 uaddress_limit = bpf_arch_uaddress_limit();
+
+ if (!uaddress_limit)
+ goto next_insn;
+
+ *patch++ = BPF_MOV64_REG(BPF_REG_AX, insn->src_reg);
+ if (insn->off)
+ *patch++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_AX, insn->off);
+ *patch++ = BPF_ALU64_IMM(BPF_RSH, BPF_REG_AX, 32);
+ *patch++ = BPF_JMP_IMM(BPF_JLE, BPF_REG_AX, uaddress_limit >> 32, 2);
+ *patch++ = *insn;
+ *patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+ *patch++ = BPF_MOV64_IMM(insn->dst_reg, 0);
+
+ cnt = patch - insn_buf;
+ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
+ if (!new_prog)
+ return -ENOMEM;
+
+ delta += cnt - 1;
+ env->prog = prog = new_prog;
+ insn = new_prog->insnsi + i + delta;
+ goto next_insn;
+ }
+
/* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */
if (BPF_CLASS(insn->code) == BPF_LD &&
(BPF_MODE(insn->code) == BPF_ABS ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0066c8f6c154..d2dbe099286b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1277,8 +1277,12 @@ static bool kick_pool(struct worker_pool *pool)
!cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) {
struct work_struct *work = list_first_entry(&pool->worklist,
struct work_struct, entry);
- p->wake_cpu = cpumask_any_distribute(pool->attrs->__pod_cpumask);
- get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++;
+ int wake_cpu = cpumask_any_and_distribute(pool->attrs->__pod_cpumask,
+ cpu_online_mask);
+ if (wake_cpu < nr_cpu_ids) {
+ p->wake_cpu = wake_cpu;
+ get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++;
+ }
}
#endif
wake_up_process(p);
@@ -1594,6 +1598,15 @@ static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu)
if (off_cpu >= 0)
total_cpus--;
+ /* If all CPUs of the wq get offline, use the default values */
+ if (unlikely(!total_cpus)) {
+ for_each_node(node)
+ wq_node_nr_active(wq, node)->max = min_active;
+
+ wq_node_nr_active(wq, NUMA_NO_NODE)->max = max_active;
+ return;
+ }
+
for_each_node(node) {
int node_cpus;
@@ -1606,7 +1619,7 @@ static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu)
min_active, max_active);
}
- wq_node_nr_active(wq, NUMA_NO_NODE)->max = min_active;
+ wq_node_nr_active(wq, NUMA_NO_NODE)->max = max_active;
}
/**
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index c63a5fbf1f1c..291185f54ee4 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -375,7 +375,7 @@ config DEBUG_INFO_SPLIT
Incompatible with older versions of ccache.
config DEBUG_INFO_BTF
- bool "Generate BTF typeinfo"
+ bool "Generate BTF type information"
depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
depends on BPF_SYSCALL
@@ -408,7 +408,8 @@ config PAHOLE_HAS_LANG_EXCLUDE
using DEBUG_INFO_BTF_MODULES.
config DEBUG_INFO_BTF_MODULES
- def_bool y
+ bool "Generate BTF type information for kernel modules"
+ default y
depends on DEBUG_INFO_BTF && MODULES && PAHOLE_HAS_SPLIT_BTF
help
Generate compact split BTF type information for kernel modules.
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 68b45c82c37a..7bc2220fea80 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -1124,7 +1124,7 @@ static ssize_t extract_user_to_sg(struct iov_iter *iter,
do {
res = iov_iter_extract_pages(iter, &pages, maxsize, sg_max,
extraction_flags, &off);
- if (res < 0)
+ if (res <= 0)
goto failed;
len = res;
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index f00158234505..9404dd551dfd 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -478,6 +478,8 @@ static struct sk_buff *vlan_gro_receive(struct list_head *head,
if (unlikely(!vhdr))
goto out;
+ NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = hlen;
+
type = vhdr->h_vlan_encapsulated_proto;
ptype = gro_find_receive_by_type(type);
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 7431f89e897b..d7c35f55bd69 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -266,7 +266,7 @@ static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
if (skb->dev == p->dev && ether_addr_equal(src, addr))
return;
- skb = skb_copy(skb, GFP_ATOMIC);
+ skb = pskb_copy(skb, GFP_ATOMIC);
if (!skb) {
DEV_STATS_INC(dev, tx_dropped);
return;
diff --git a/net/core/filter.c b/net/core/filter.c
index 8adf95765cdd..ae5254f712c9 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -4360,10 +4360,12 @@ static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri,
enum bpf_map_type map_type = ri->map_type;
void *fwd = ri->tgt_value;
u32 map_id = ri->map_id;
+ u32 flags = ri->flags;
struct bpf_map *map;
int err;
ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
+ ri->flags = 0;
ri->map_type = BPF_MAP_TYPE_UNSPEC;
if (unlikely(!xdpf)) {
@@ -4375,11 +4377,20 @@ static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri,
case BPF_MAP_TYPE_DEVMAP:
fallthrough;
case BPF_MAP_TYPE_DEVMAP_HASH:
- map = READ_ONCE(ri->map);
- if (unlikely(map)) {
+ if (unlikely(flags & BPF_F_BROADCAST)) {
+ map = READ_ONCE(ri->map);
+
+ /* The map pointer is cleared when the map is being torn
+ * down by bpf_clear_redirect_map()
+ */
+ if (unlikely(!map)) {
+ err = -ENOENT;
+ break;
+ }
+
WRITE_ONCE(ri->map, NULL);
err = dev_map_enqueue_multi(xdpf, dev, map,
- ri->flags & BPF_F_EXCLUDE_INGRESS);
+ flags & BPF_F_EXCLUDE_INGRESS);
} else {
err = dev_map_enqueue(fwd, xdpf, dev);
}
@@ -4442,9 +4453,9 @@ EXPORT_SYMBOL_GPL(xdp_do_redirect_frame);
static int xdp_do_generic_redirect_map(struct net_device *dev,
struct sk_buff *skb,
struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog,
- void *fwd,
- enum bpf_map_type map_type, u32 map_id)
+ struct bpf_prog *xdp_prog, void *fwd,
+ enum bpf_map_type map_type, u32 map_id,
+ u32 flags)
{
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
struct bpf_map *map;
@@ -4454,11 +4465,20 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
case BPF_MAP_TYPE_DEVMAP:
fallthrough;
case BPF_MAP_TYPE_DEVMAP_HASH:
- map = READ_ONCE(ri->map);
- if (unlikely(map)) {
+ if (unlikely(flags & BPF_F_BROADCAST)) {
+ map = READ_ONCE(ri->map);
+
+ /* The map pointer is cleared when the map is being torn
+ * down by bpf_clear_redirect_map()
+ */
+ if (unlikely(!map)) {
+ err = -ENOENT;
+ break;
+ }
+
WRITE_ONCE(ri->map, NULL);
err = dev_map_redirect_multi(dev, skb, xdp_prog, map,
- ri->flags & BPF_F_EXCLUDE_INGRESS);
+ flags & BPF_F_EXCLUDE_INGRESS);
} else {
err = dev_map_generic_redirect(fwd, skb, xdp_prog);
}
@@ -4495,9 +4515,11 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
enum bpf_map_type map_type = ri->map_type;
void *fwd = ri->tgt_value;
u32 map_id = ri->map_id;
+ u32 flags = ri->flags;
int err;
ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
+ ri->flags = 0;
ri->map_type = BPF_MAP_TYPE_UNSPEC;
if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) {
@@ -4517,7 +4539,7 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
return 0;
}
- return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id);
+ return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id, flags);
err:
_trace_xdp_redirect_err(dev, xdp_prog, ri->tgt_index, err);
return err;
diff --git a/net/core/gro.c b/net/core/gro.c
index 83f35d99a682..c7901253a1a8 100644
--- a/net/core/gro.c
+++ b/net/core/gro.c
@@ -371,6 +371,7 @@ static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
const skb_frag_t *frag0;
unsigned int headlen;
+ NAPI_GRO_CB(skb)->network_offset = 0;
NAPI_GRO_CB(skb)->data_offset = 0;
headlen = skb_headlen(skb);
NAPI_GRO_CB(skb)->frag0 = skb->data;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b99127712e67..4096e679f61c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2123,11 +2123,17 @@ static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
{
- int headerlen = skb_headroom(skb);
- unsigned int size = skb_end_offset(skb) + skb->data_len;
- struct sk_buff *n = __alloc_skb(size, gfp_mask,
- skb_alloc_rx_flag(skb), NUMA_NO_NODE);
+ struct sk_buff *n;
+ unsigned int size;
+ int headerlen;
+
+ if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
+ return NULL;
+ headerlen = skb_headroom(skb);
+ size = skb_end_offset(skb) + skb->data_len;
+ n = __alloc_skb(size, gfp_mask,
+ skb_alloc_rx_flag(skb), NUMA_NO_NODE);
if (!n)
return NULL;
@@ -2455,12 +2461,17 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
/*
* Allocate the copy buffer
*/
- struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
- gfp_mask, skb_alloc_rx_flag(skb),
- NUMA_NO_NODE);
- int oldheadroom = skb_headroom(skb);
int head_copy_len, head_copy_off;
+ struct sk_buff *n;
+ int oldheadroom;
+
+ if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
+ return NULL;
+ oldheadroom = skb_headroom(skb);
+ n = __alloc_skb(newheadroom + skb->len + newtailroom,
+ gfp_mask, skb_alloc_rx_flag(skb),
+ NUMA_NO_NODE);
if (!n)
return NULL;
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 4d75ef9d24bf..fd20aae30be2 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -1226,11 +1226,8 @@ static void sk_psock_verdict_data_ready(struct sock *sk)
rcu_read_lock();
psock = sk_psock(sk);
- if (psock) {
- read_lock_bh(&sk->sk_callback_lock);
+ if (psock)
sk_psock_data_ready(sk, psock);
- read_unlock_bh(&sk->sk_callback_lock);
- }
rcu_read_unlock();
}
}
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 55bd72997b31..fafb123f798b 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1572,6 +1572,7 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
/* The above will be needed by the transport layer if there is one
* immediately following this IP hdr.
*/
+ NAPI_GRO_CB(skb)->inner_network_offset = off;
/* Note : No need to call skb_gro_postpull_rcsum() here,
* as we already checked checksum over ipv4 header was 0
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 1fe794967211..39229fd0601a 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1473,7 +1473,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
* by icmp_hdr(skb)->type.
*/
if (sk->sk_type == SOCK_RAW &&
- !inet_test_bit(HDRINCL, sk))
+ !(fl4->flowi4_flags & FLOWI_FLAG_KNOWN_NH))
icmp_type = fl4->fl4_icmp_type;
else
icmp_type = icmp_hdr(skb)->type;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index dcb11f22cbf2..4cb43401e0e0 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -612,6 +612,9 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
(hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
daddr, saddr, 0, 0, sk->sk_uid);
+ fl4.fl4_icmp_type = 0;
+ fl4.fl4_icmp_code = 0;
+
if (!hdrincl) {
rfv.msg = msg;
rfv.hlen = 0;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 420905be5f30..b32cf2eeeb41 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -532,7 +532,8 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb,
__be16 sport, __be16 dport)
{
- const struct iphdr *iph = ip_hdr(skb);
+ const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
+ const struct iphdr *iph = (struct iphdr *)(skb->data + offset);
struct net *net = dev_net(skb->dev);
int iif, sdif;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 3498dd1d0694..8721fe5beca2 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -471,6 +471,7 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
struct sk_buff *p;
unsigned int ulen;
int ret = 0;
+ int flush;
/* requires non zero csum, for symmetry with GSO */
if (!uh->check) {
@@ -504,13 +505,22 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
return p;
}
+ flush = NAPI_GRO_CB(p)->flush;
+
+ if (NAPI_GRO_CB(p)->flush_id != 1 ||
+ NAPI_GRO_CB(p)->count != 1 ||
+ !NAPI_GRO_CB(p)->is_atomic)
+ flush |= NAPI_GRO_CB(p)->flush_id;
+ else
+ NAPI_GRO_CB(p)->is_atomic = false;
+
/* Terminate the flow on len mismatch or if it grow "too much".
* Under small packet flood GRO count could elsewhere grow a lot
* leading to excessive truesize values.
* On len mismatch merge the first packet shorter than gso_size,
* otherwise complete the GRO packet.
*/
- if (ulen > ntohs(uh2->len)) {
+ if (ulen > ntohs(uh2->len) || flush) {
pp = p;
} else {
if (NAPI_GRO_CB(skb)->is_flist) {
@@ -718,7 +728,8 @@ EXPORT_SYMBOL(udp_gro_complete);
INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff)
{
- const struct iphdr *iph = ip_hdr(skb);
+ const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
+ const struct iphdr *iph = (struct iphdr *)(skb->data + offset);
struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
/* do fraglist only if there is no outer UDP encap (or we already processed it) */
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index b41e35af69ea..c8b909a9904f 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -237,6 +237,7 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
goto out;
skb_set_network_header(skb, off);
+ NAPI_GRO_CB(skb)->inner_network_offset = off;
flush += ntohs(iph->payload_len) != skb->len - hlen;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 1a4cccdd40c9..8f7aa8bac1e7 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -272,7 +272,8 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
__be16 sport, __be16 dport)
{
- const struct ipv6hdr *iph = ipv6_hdr(skb);
+ const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
+ const struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + offset);
struct net *net = dev_net(skb->dev);
int iif, sdif;
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index bbd347de00b4..b41152dd4246 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -164,7 +164,8 @@ flush:
INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff)
{
- const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
+ const struct ipv6hdr *ipv6h = (struct ipv6hdr *)(skb->data + offset);
struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
/* do fraglist only if there is no outer UDP encap (or we already processed it) */
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 39e487ccc468..8ba00ad433c2 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -127,6 +127,9 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
/* checksums verified by L2TP */
skb->ip_summed = CHECKSUM_NONE;
+ /* drop outer flow-hash */
+ skb_clear_hash(skb);
+
skb_dst_drop(skb);
nf_reset_ct(skb);
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 7e74b812e366..965eb69dc5de 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -3723,6 +3723,9 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_TOKENFALLBACKINIT);
mptcp_subflow_early_fallback(msk, subflow);
}
+
+ WRITE_ONCE(msk->write_seq, subflow->idsn);
+ WRITE_ONCE(msk->snd_nxt, subflow->idsn);
if (likely(!__mptcp_check_fallback(msk)))
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVE);
diff --git a/net/nsh/nsh.c b/net/nsh/nsh.c
index f4a38bd6a7e0..bfb7758063f3 100644
--- a/net/nsh/nsh.c
+++ b/net/nsh/nsh.c
@@ -77,13 +77,15 @@ EXPORT_SYMBOL_GPL(nsh_pop);
static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
+ unsigned int outer_hlen, mac_len, nsh_len;
struct sk_buff *segs = ERR_PTR(-EINVAL);
u16 mac_offset = skb->mac_header;
- unsigned int nsh_len, mac_len;
- __be16 proto;
+ __be16 outer_proto, proto;
skb_reset_network_header(skb);
+ outer_proto = skb->protocol;
+ outer_hlen = skb_mac_header_len(skb);
mac_len = skb->mac_len;
if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN)))
@@ -113,10 +115,10 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
}
for (skb = segs; skb; skb = skb->next) {
- skb->protocol = htons(ETH_P_NSH);
- __skb_push(skb, nsh_len);
- skb->mac_header = mac_offset;
- skb->network_header = skb->mac_header + mac_len;
+ skb->protocol = outer_proto;
+ __skb_push(skb, nsh_len + outer_hlen);
+ skb_reset_mac_header(skb);
+ skb_set_network_header(skb, outer_hlen);
skb->mac_len = mac_len;
}
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index 0af4642aeec4..1539d315afe7 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -119,18 +119,13 @@ struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *lo
switch (srx->transport.family) {
case AF_INET:
if (peer->srx.transport.sin.sin_port !=
- srx->transport.sin.sin_port ||
- peer->srx.transport.sin.sin_addr.s_addr !=
- srx->transport.sin.sin_addr.s_addr)
+ srx->transport.sin.sin_port)
goto not_found;
break;
#ifdef CONFIG_AF_RXRPC_IPV6
case AF_INET6:
if (peer->srx.transport.sin6.sin6_port !=
- srx->transport.sin6.sin6_port ||
- memcmp(&peer->srx.transport.sin6.sin6_addr,
- &srx->transport.sin6.sin6_addr,
- sizeof(struct in6_addr)) != 0)
+ srx->transport.sin6.sin6_port)
goto not_found;
break;
#endif
diff --git a/net/rxrpc/insecure.c b/net/rxrpc/insecure.c
index f2701068ed9e..6716c021a532 100644
--- a/net/rxrpc/insecure.c
+++ b/net/rxrpc/insecure.c
@@ -19,7 +19,7 @@ static int none_init_connection_security(struct rxrpc_connection *conn,
*/
static struct rxrpc_txbuf *none_alloc_txbuf(struct rxrpc_call *call, size_t remain, gfp_t gfp)
{
- return rxrpc_alloc_data_txbuf(call, min_t(size_t, remain, RXRPC_JUMBO_DATALEN), 0, gfp);
+ return rxrpc_alloc_data_txbuf(call, min_t(size_t, remain, RXRPC_JUMBO_DATALEN), 1, gfp);
}
static int none_secure_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index f1a68270862d..48a1475e6b06 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -155,7 +155,7 @@ static struct rxrpc_txbuf *rxkad_alloc_txbuf(struct rxrpc_call *call, size_t rem
switch (call->conn->security_level) {
default:
space = min_t(size_t, remain, RXRPC_JUMBO_DATALEN);
- return rxrpc_alloc_data_txbuf(call, space, 0, gfp);
+ return rxrpc_alloc_data_txbuf(call, space, 1, gfp);
case RXRPC_SECURITY_AUTH:
shdr = sizeof(struct rxkad_level1_hdr);
break;
diff --git a/net/rxrpc/txbuf.c b/net/rxrpc/txbuf.c
index e0679658d9de..c3913d8a50d3 100644
--- a/net/rxrpc/txbuf.c
+++ b/net/rxrpc/txbuf.c
@@ -21,20 +21,20 @@ struct rxrpc_txbuf *rxrpc_alloc_data_txbuf(struct rxrpc_call *call, size_t data_
{
struct rxrpc_wire_header *whdr;
struct rxrpc_txbuf *txb;
- size_t total, hoff = 0;
+ size_t total, hoff;
void *buf;
txb = kmalloc(sizeof(*txb), gfp);
if (!txb)
return NULL;
- if (data_align)
- hoff = round_up(sizeof(*whdr), data_align) - sizeof(*whdr);
+ hoff = round_up(sizeof(*whdr), data_align) - sizeof(*whdr);
total = hoff + sizeof(*whdr) + data_size;
+ data_align = umax(data_align, L1_CACHE_BYTES);
mutex_lock(&call->conn->tx_data_alloc_lock);
- buf = __page_frag_alloc_align(&call->conn->tx_data_alloc, total, gfp,
- ~(data_align - 1) & ~(L1_CACHE_BYTES - 1));
+ buf = page_frag_alloc_align(&call->conn->tx_data_alloc, total, gfp,
+ data_align);
mutex_unlock(&call->conn->tx_data_alloc_lock);
if (!buf) {
kfree(txb);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index bb9b747d58a1..ce18716491c8 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2664,6 +2664,7 @@ static void xs_tcp_tls_setup_socket(struct work_struct *work)
.xprtsec = {
.policy = RPC_XPRTSEC_NONE,
},
+ .stats = upper_clnt->cl_stats,
};
unsigned int pflags = current->flags;
struct rpc_clnt *lower_clnt;
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 5c9fd4791c4b..76284fc538eb 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -142,9 +142,9 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
if (fragid == FIRST_FRAGMENT) {
if (unlikely(head))
goto err;
- *buf = NULL;
if (skb_has_frag_list(frag) && __skb_linearize(frag))
goto err;
+ *buf = NULL;
frag = skb_unshare(frag, GFP_ATOMIC);
if (unlikely(!frag))
goto err;
@@ -156,6 +156,11 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
if (!head)
goto err;
+ /* Either the input skb ownership is transferred to headskb
+ * or the input skb is freed, clear the reference to avoid
+ * bad access on error path.
+ */
+ *buf = NULL;
if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
kfree_skb_partial(frag, headstolen);
} else {
@@ -179,7 +184,6 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
*headbuf = NULL;
return 1;
}
- *buf = NULL;
return 0;
err:
kfree_skb(*buf);
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index 39ad96a18123..edcd26106557 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -205,6 +205,9 @@ __weak noinline struct file *bpf_testmod_return_ptr(int arg)
case 5: return (void *)~(1ull << 30); /* trigger extable */
case 6: return &f; /* valid addr */
case 7: return (void *)((long)&f | 1); /* kernel tricks */
+#ifdef CONFIG_X86_64
+ case 8: return (void *)VSYSCALL_ADDR; /* vsyscall page address */
+#endif
default: return NULL;
}
}
diff --git a/tools/testing/selftests/kvm/aarch64/vgic_init.c b/tools/testing/selftests/kvm/aarch64/vgic_init.c
index eef816b80993..ca917c71ff60 100644
--- a/tools/testing/selftests/kvm/aarch64/vgic_init.c
+++ b/tools/testing/selftests/kvm/aarch64/vgic_init.c
@@ -84,6 +84,18 @@ static struct vm_gic vm_gic_create_with_vcpus(uint32_t gic_dev_type,
return v;
}
+static struct vm_gic vm_gic_create_barebones(uint32_t gic_dev_type)
+{
+ struct vm_gic v;
+
+ v.gic_dev_type = gic_dev_type;
+ v.vm = vm_create_barebones();
+ v.gic_fd = kvm_create_device(v.vm, gic_dev_type);
+
+ return v;
+}
+
+
static void vm_gic_destroy(struct vm_gic *v)
{
close(v->gic_fd);
@@ -357,6 +369,40 @@ static void test_vcpus_then_vgic(uint32_t gic_dev_type)
vm_gic_destroy(&v);
}
+#define KVM_VGIC_V2_ATTR(offset, cpu) \
+ (FIELD_PREP(KVM_DEV_ARM_VGIC_OFFSET_MASK, offset) | \
+ FIELD_PREP(KVM_DEV_ARM_VGIC_CPUID_MASK, cpu))
+
+#define GIC_CPU_CTRL 0x00
+
+static void test_v2_uaccess_cpuif_no_vcpus(void)
+{
+ struct vm_gic v;
+ u64 val = 0;
+ int ret;
+
+ v = vm_gic_create_barebones(KVM_DEV_TYPE_ARM_VGIC_V2);
+ subtest_dist_rdist(&v);
+
+ ret = __kvm_has_device_attr(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CPU_REGS,
+ KVM_VGIC_V2_ATTR(GIC_CPU_CTRL, 0));
+ TEST_ASSERT(ret && errno == EINVAL,
+ "accessed non-existent CPU interface, want errno: %i",
+ EINVAL);
+ ret = __kvm_device_attr_get(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CPU_REGS,
+ KVM_VGIC_V2_ATTR(GIC_CPU_CTRL, 0), &val);
+ TEST_ASSERT(ret && errno == EINVAL,
+ "accessed non-existent CPU interface, want errno: %i",
+ EINVAL);
+ ret = __kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CPU_REGS,
+ KVM_VGIC_V2_ATTR(GIC_CPU_CTRL, 0), &val);
+ TEST_ASSERT(ret && errno == EINVAL,
+ "accessed non-existent CPU interface, want errno: %i",
+ EINVAL);
+
+ vm_gic_destroy(&v);
+}
+
static void test_v3_new_redist_regions(void)
{
struct kvm_vcpu *vcpus[NR_VCPUS];
@@ -675,6 +721,9 @@ void run_tests(uint32_t gic_dev_type)
test_vcpus_then_vgic(gic_dev_type);
test_vgic_then_vcpus(gic_dev_type);
+ if (VGIC_DEV_IS_V2(gic_dev_type))
+ test_v2_uaccess_cpuif_no_vcpus();
+
if (VGIC_DEV_IS_V3(gic_dev_type)) {
test_v3_new_redist_regions();
test_v3_typer_accesses();