summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/fid.h3
-rw-r--r--fs/9p/v9fs.h11
-rw-r--r--fs/9p/vfs_addr.c60
-rw-r--r--fs/9p/vfs_file.c2
-rw-r--r--fs/9p/vfs_inode.c60
-rw-r--r--fs/9p/vfs_inode_dotl.c34
-rw-r--r--fs/9p/vfs_super.c19
-rw-r--r--fs/afs/file.c8
-rw-r--r--fs/afs/internal.h6
-rw-r--r--fs/afs/rotate.c8
-rw-r--r--fs/afs/validation.c4
-rw-r--r--fs/afs/write.c189
-rw-r--r--fs/aio.c93
-rw-r--r--fs/anon_inodes.c33
-rw-r--r--fs/bcachefs/Makefile3
-rw-r--r--fs/bcachefs/acl.c67
-rw-r--r--fs/bcachefs/alloc_background.c386
-rw-r--r--fs/bcachefs/alloc_background.h117
-rw-r--r--fs/bcachefs/alloc_foreground.c308
-rw-r--r--fs/bcachefs/alloc_foreground.h15
-rw-r--r--fs/bcachefs/alloc_types.h10
-rw-r--r--fs/bcachefs/backpointers.c330
-rw-r--r--fs/bcachefs/backpointers.h92
-rw-r--r--fs/bcachefs/bcachefs.h44
-rw-r--r--fs/bcachefs/bcachefs_format.h47
-rw-r--r--fs/bcachefs/bkey.c15
-rw-r--r--fs/bcachefs/bkey.h39
-rw-r--r--fs/bcachefs/bkey_methods.c28
-rw-r--r--fs/bcachefs/bkey_methods.h73
-rw-r--r--fs/bcachefs/bkey_sort.c79
-rw-r--r--fs/bcachefs/bkey_sort.h4
-rw-r--r--fs/bcachefs/bset.c43
-rw-r--r--fs/bcachefs/bset.h8
-rw-r--r--fs/bcachefs/btree_cache.c227
-rw-r--r--fs/bcachefs/btree_cache.h5
-rw-r--r--fs/bcachefs/btree_gc.c1439
-rw-r--r--fs/bcachefs/btree_gc.h44
-rw-r--r--fs/bcachefs/btree_io.c154
-rw-r--r--fs/bcachefs/btree_io.h2
-rw-r--r--fs/bcachefs/btree_iter.c399
-rw-r--r--fs/bcachefs/btree_iter.h92
-rw-r--r--fs/bcachefs/btree_journal_iter.c126
-rw-r--r--fs/bcachefs/btree_journal_iter.h10
-rw-r--r--fs/bcachefs/btree_key_cache.c144
-rw-r--r--fs/bcachefs/btree_key_cache_types.h8
-rw-r--r--fs/bcachefs/btree_locking.c207
-rw-r--r--fs/bcachefs/btree_locking.h4
-rw-r--r--fs/bcachefs/btree_node_scan.c524
-rw-r--r--fs/bcachefs/btree_node_scan.h11
-rw-r--r--fs/bcachefs/btree_node_scan_types.h31
-rw-r--r--fs/bcachefs/btree_trans_commit.c109
-rw-r--r--fs/bcachefs/btree_types.h143
-rw-r--r--fs/bcachefs/btree_update.c101
-rw-r--r--fs/bcachefs/btree_update.h14
-rw-r--r--fs/bcachefs/btree_update_interior.c482
-rw-r--r--fs/bcachefs/btree_update_interior.h36
-rw-r--r--fs/bcachefs/btree_write_buffer.c36
-rw-r--r--fs/bcachefs/buckets.c695
-rw-r--r--fs/bcachefs/buckets.h79
-rw-r--r--fs/bcachefs/chardev.c170
-rw-r--r--fs/bcachefs/checksum.c41
-rw-r--r--fs/bcachefs/checksum.h5
-rw-r--r--fs/bcachefs/compress.h8
-rw-r--r--fs/bcachefs/data_update.c83
-rw-r--r--fs/bcachefs/debug.c155
-rw-r--r--fs/bcachefs/dirent.c97
-rw-r--r--fs/bcachefs/dirent.h8
-rw-r--r--fs/bcachefs/disk_groups.c11
-rw-r--r--fs/bcachefs/ec.c417
-rw-r--r--fs/bcachefs/ec.h9
-rw-r--r--fs/bcachefs/errcode.h4
-rw-r--r--fs/bcachefs/error.c65
-rw-r--r--fs/bcachefs/error.h6
-rw-r--r--fs/bcachefs/extent_update.c2
-rw-r--r--fs/bcachefs/extents.c207
-rw-r--r--fs/bcachefs/extents.h37
-rw-r--r--fs/bcachefs/eytzinger.c305
-rw-r--r--fs/bcachefs/eytzinger.h81
-rw-r--r--fs/bcachefs/fs-common.c38
-rw-r--r--fs/bcachefs/fs-io-buffered.c14
-rw-r--r--fs/bcachefs/fs-io-direct.c25
-rw-r--r--fs/bcachefs/fs-io-pagecache.c2
-rw-r--r--fs/bcachefs/fs-io.c25
-rw-r--r--fs/bcachefs/fs-ioctl.c2
-rw-r--r--fs/bcachefs/fs.c124
-rw-r--r--fs/bcachefs/fsck.c472
-rw-r--r--fs/bcachefs/inode.c66
-rw-r--r--fs/bcachefs/inode.h23
-rw-r--r--fs/bcachefs/io_misc.c12
-rw-r--r--fs/bcachefs/io_read.c68
-rw-r--r--fs/bcachefs/io_write.c125
-rw-r--r--fs/bcachefs/io_write_types.h1
-rw-r--r--fs/bcachefs/journal.c139
-rw-r--r--fs/bcachefs/journal.h6
-rw-r--r--fs/bcachefs/journal_io.c240
-rw-r--r--fs/bcachefs/journal_io.h5
-rw-r--r--fs/bcachefs/journal_reclaim.c10
-rw-r--r--fs/bcachefs/journal_sb.c10
-rw-r--r--fs/bcachefs/journal_seq_blacklist.c80
-rw-r--r--fs/bcachefs/journal_seq_blacklist.h2
-rw-r--r--fs/bcachefs/journal_types.h16
-rw-r--r--fs/bcachefs/logged_ops.c9
-rw-r--r--fs/bcachefs/lru.c4
-rw-r--r--fs/bcachefs/lru.h2
-rw-r--r--fs/bcachefs/mean_and_variance_test.c28
-rw-r--r--fs/bcachefs/migrate.c8
-rw-r--r--fs/bcachefs/move.c86
-rw-r--r--fs/bcachefs/movinggc.c4
-rw-r--r--fs/bcachefs/opts.c33
-rw-r--r--fs/bcachefs/opts.h28
-rw-r--r--fs/bcachefs/printbuf.c232
-rw-r--r--fs/bcachefs/printbuf.h53
-rw-r--r--fs/bcachefs/quota.c131
-rw-r--r--fs/bcachefs/quota.h4
-rw-r--r--fs/bcachefs/rebalance.c10
-rw-r--r--fs/bcachefs/recovery.c554
-rw-r--r--fs/bcachefs/recovery.h32
-rw-r--r--fs/bcachefs/recovery_passes.c245
-rw-r--r--fs/bcachefs/recovery_passes.h17
-rw-r--r--fs/bcachefs/recovery_passes_types.h (renamed from fs/bcachefs/recovery_types.h)11
-rw-r--r--fs/bcachefs/reflink.c75
-rw-r--r--fs/bcachefs/reflink.h16
-rw-r--r--fs/bcachefs/replicas.c39
-rw-r--r--fs/bcachefs/sb-clean.c35
-rw-r--r--fs/bcachefs/sb-counters.c20
-rw-r--r--fs/bcachefs/sb-downgrade.c32
-rw-r--r--fs/bcachefs/sb-errors.c2
-rw-r--r--fs/bcachefs/sb-errors_types.h12
-rw-r--r--fs/bcachefs/sb-members.c196
-rw-r--r--fs/bcachefs/sb-members.h186
-rw-r--r--fs/bcachefs/sb-members_types.h21
-rw-r--r--fs/bcachefs/snapshot.c274
-rw-r--r--fs/bcachefs/snapshot.h103
-rw-r--r--fs/bcachefs/str_hash.h70
-rw-r--r--fs/bcachefs/subvolume.c94
-rw-r--r--fs/bcachefs/subvolume.h10
-rw-r--r--fs/bcachefs/subvolume_types.h2
-rw-r--r--fs/bcachefs/super-io.c182
-rw-r--r--fs/bcachefs/super-io.h3
-rw-r--r--fs/bcachefs/super.c149
-rw-r--r--fs/bcachefs/super_types.h13
-rw-r--r--fs/bcachefs/sysfs.c195
-rw-r--r--fs/bcachefs/tests.c18
-rw-r--r--fs/bcachefs/thread_with_file.c15
-rw-r--r--fs/bcachefs/thread_with_file.h3
-rw-r--r--fs/bcachefs/trace.h97
-rw-r--r--fs/bcachefs/util.c204
-rw-r--r--fs/bcachefs/util.h22
-rw-r--r--fs/bcachefs/xattr.c47
-rw-r--r--fs/bcachefs/xattr.h2
-rw-r--r--fs/binfmt_elf.c12
-rw-r--r--fs/binfmt_elf_fdpic.c87
-rw-r--r--fs/btrfs/backref.c60
-rw-r--r--fs/btrfs/block-group.c3
-rw-r--r--fs/btrfs/block-rsv.c11
-rw-r--r--fs/btrfs/btrfs_inode.h10
-rw-r--r--fs/btrfs/compression.c119
-rw-r--r--fs/btrfs/compression.h42
-rw-r--r--fs/btrfs/ctree.c51
-rw-r--r--fs/btrfs/defrag.c2
-rw-r--r--fs/btrfs/delayed-inode.c5
-rw-r--r--fs/btrfs/delayed-ref.c345
-rw-r--r--fs/btrfs/delayed-ref.h148
-rw-r--r--fs/btrfs/disk-io.c157
-rw-r--r--fs/btrfs/export.c8
-rw-r--r--fs/btrfs/extent-io-tree.c58
-rw-r--r--fs/btrfs/extent-tree.c372
-rw-r--r--fs/btrfs/extent_io.c255
-rw-r--r--fs/btrfs/extent_io.h11
-rw-r--r--fs/btrfs/extent_map.c334
-rw-r--r--fs/btrfs/extent_map.h67
-rw-r--r--fs/btrfs/file-item.c88
-rw-r--r--fs/btrfs/file-item.h3
-rw-r--r--fs/btrfs/file.c331
-rw-r--r--fs/btrfs/free-space-cache.c8
-rw-r--r--fs/btrfs/fs.h5
-rw-r--r--fs/btrfs/inode-item.c16
-rw-r--r--fs/btrfs/inode.c954
-rw-r--r--fs/btrfs/ioctl.c156
-rw-r--r--fs/btrfs/locking.c26
-rw-r--r--fs/btrfs/locking.h18
-rw-r--r--fs/btrfs/lzo.c89
-rw-r--r--fs/btrfs/messages.c2
-rw-r--r--fs/btrfs/ordered-data.c9
-rw-r--r--fs/btrfs/ordered-data.h1
-rw-r--r--fs/btrfs/props.c2
-rw-r--r--fs/btrfs/qgroup.c104
-rw-r--r--fs/btrfs/raid56.c3
-rw-r--r--fs/btrfs/ref-verify.c8
-rw-r--r--fs/btrfs/reflink.c56
-rw-r--r--fs/btrfs/relocation.c415
-rw-r--r--fs/btrfs/root-tree.c13
-rw-r--r--fs/btrfs/root-tree.h2
-rw-r--r--fs/btrfs/scrub.c30
-rw-r--r--fs/btrfs/send.c74
-rw-r--r--fs/btrfs/super.c33
-rw-r--r--fs/btrfs/sysfs.c8
-rw-r--r--fs/btrfs/tests/btrfs-tests.c3
-rw-r--r--fs/btrfs/tests/extent-map-tests.c221
-rw-r--r--fs/btrfs/transaction.c95
-rw-r--r--fs/btrfs/tree-checker.c32
-rw-r--r--fs/btrfs/tree-checker.h1
-rw-r--r--fs/btrfs/tree-log.c46
-rw-r--r--fs/btrfs/tree-mod-log.c2
-rw-r--r--fs/btrfs/volumes.c43
-rw-r--r--fs/btrfs/volumes.h10
-rw-r--r--fs/btrfs/xattr.c10
-rw-r--r--fs/btrfs/zlib.c112
-rw-r--r--fs/btrfs/zoned.c14
-rw-r--r--fs/btrfs/zstd.c80
-rw-r--r--fs/buffer.c165
-rw-r--r--fs/cachefiles/io.c76
-rw-r--r--fs/ceph/addr.c28
-rw-r--r--fs/ceph/caps.c4
-rw-r--r--fs/ceph/inode.c2
-rw-r--r--fs/ceph/mds_client.c9
-rw-r--r--fs/ceph/mds_client.h3
-rw-r--r--fs/coredump.c17
-rw-r--r--fs/cramfs/inode.c2
-rw-r--r--fs/crypto/hooks.c32
-rw-r--r--fs/crypto/inline_crypt.c6
-rw-r--r--fs/dax.c14
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/debugfs/inode.c198
-rw-r--r--fs/direct-io.c1
-rw-r--r--fs/dlm/ast.c218
-rw-r--r--fs/dlm/ast.h13
-rw-r--r--fs/dlm/config.c8
-rw-r--r--fs/dlm/config.h2
-rw-r--r--fs/dlm/debug_fs.c327
-rw-r--r--fs/dlm/dir.c157
-rw-r--r--fs/dlm/dir.h3
-rw-r--r--fs/dlm/dlm_internal.h129
-rw-r--r--fs/dlm/lock.c1068
-rw-r--r--fs/dlm/lock.h12
-rw-r--r--fs/dlm/lockspace.c212
-rw-r--r--fs/dlm/lowcomms.c62
-rw-r--r--fs/dlm/lowcomms.h5
-rw-r--r--fs/dlm/member.c25
-rw-r--r--fs/dlm/memory.c18
-rw-r--r--fs/dlm/memory.h4
-rw-r--r--fs/dlm/midcomms.c67
-rw-r--r--fs/dlm/midcomms.h3
-rw-r--r--fs/dlm/rcom.c33
-rw-r--r--fs/dlm/recover.c149
-rw-r--r--fs/dlm/recover.h10
-rw-r--r--fs/dlm/recoverd.c142
-rw-r--r--fs/dlm/requestqueue.c43
-rw-r--r--fs/dlm/user.c135
-rw-r--r--fs/ecryptfs/crypto.c4
-rw-r--r--fs/ecryptfs/keystore.c4
-rw-r--r--fs/ecryptfs/main.c26
-rw-r--r--fs/efivarfs/internal.h5
-rw-r--r--fs/efivarfs/vars.c5
-rw-r--r--fs/erofs/Kconfig15
-rw-r--r--fs/erofs/Makefile5
-rw-r--r--fs/erofs/compress.h4
-rw-r--r--fs/erofs/decompressor.c15
-rw-r--r--fs/erofs/decompressor_zstd.c279
-rw-r--r--fs/erofs/erofs_fs.h15
-rw-r--r--fs/erofs/fscache.c2
-rw-r--r--fs/erofs/internal.h35
-rw-r--r--fs/erofs/pcpubuf.c148
-rw-r--r--fs/erofs/super.c153
-rw-r--r--fs/erofs/zmap.c24
-rw-r--r--fs/erofs/zutil.c (renamed from fs/erofs/utils.c)206
-rw-r--r--fs/eventpoll.c38
-rw-r--r--fs/exec.c20
-rw-r--r--fs/exfat/dir.c2
-rw-r--r--fs/exfat/file.c7
-rw-r--r--fs/ext4/acl.h5
-rw-r--r--fs/ext4/ext4.h9
-rw-r--r--fs/ext4/extents.c3
-rw-r--r--fs/ext4/file.c9
-rw-r--r--fs/ext4/inode.c11
-rw-r--r--fs/ext4/ioctl.c3
-rw-r--r--fs/ext4/mballoc-test.c76
-rw-r--r--fs/ext4/mballoc.c322
-rw-r--r--fs/ext4/mballoc.h14
-rw-r--r--fs/ext4/move_extent.c4
-rw-r--r--fs/ext4/namei.c2
-rw-r--r--fs/ext4/page-io.c3
-rw-r--r--fs/ext4/readpage.c1
-rw-r--r--fs/ext4/super.c48
-rw-r--r--fs/ext4/sysfs.c174
-rw-r--r--fs/ext4/xattr.c145
-rw-r--r--fs/f2fs/data.c5
-rw-r--r--fs/f2fs/file.c3
-rw-r--r--fs/f2fs/super.c2
-rw-r--r--fs/fat/dir.c12
-rw-r--r--fs/fcntl.c20
-rw-r--r--fs/fhandle.c6
-rw-r--r--fs/freevxfs/vxfs_super.c69
-rw-r--r--fs/fs-writeback.c57
-rw-r--r--fs/fuse/cuse.c4
-rw-r--r--fs/fuse/dir.c1
-rw-r--r--fs/fuse/file.c12
-rw-r--r--fs/fuse/fuse_i.h7
-rw-r--r--fs/fuse/inode.c1
-rw-r--r--fs/fuse/iomode.c60
-rw-r--r--fs/fuse/passthrough.c2
-rw-r--r--fs/fuse/virtio_fs.c2
-rw-r--r--fs/gfs2/aops.c4
-rw-r--r--fs/gfs2/bmap.c7
-rw-r--r--fs/gfs2/dir.c31
-rw-r--r--fs/gfs2/file.c59
-rw-r--r--fs/gfs2/glock.c192
-rw-r--r--fs/gfs2/glock.h3
-rw-r--r--fs/gfs2/glops.c37
-rw-r--r--fs/gfs2/incore.h1
-rw-r--r--fs/gfs2/lock_dlm.c40
-rw-r--r--fs/gfs2/log.c5
-rw-r--r--fs/gfs2/meta_io.c16
-rw-r--r--fs/gfs2/ops_fstype.c49
-rw-r--r--fs/gfs2/rgrp.c10
-rw-r--r--fs/gfs2/super.c28
-rw-r--r--fs/gfs2/sys.c4
-rw-r--r--fs/gfs2/util.c63
-rw-r--r--fs/gfs2/util.h6
-rw-r--r--fs/gfs2/xattr.c28
-rw-r--r--fs/hfsplus/xattr.c22
-rw-r--r--fs/hugetlbfs/inode.c16
-rw-r--r--fs/ioctl.c7
-rw-r--r--fs/iomap/Makefile2
-rw-r--r--fs/iomap/buffered-io.c119
-rw-r--r--fs/jbd2/checkpoint.c24
-rw-r--r--fs/jbd2/commit.c3
-rw-r--r--fs/jffs2/xattr.c3
-rw-r--r--fs/jfs/jfs_logmgr.c4
-rw-r--r--fs/kernfs/file.c9
-rw-r--r--fs/libfs.c55
-rw-r--r--fs/lockd/host.c1
-rw-r--r--fs/minix/inode.c48
-rw-r--r--fs/namei.c26
-rw-r--r--fs/netfs/Makefile3
-rw-r--r--fs/netfs/buffered_read.c40
-rw-r--r--fs/netfs/buffered_write.c850
-rw-r--r--fs/netfs/direct_read.c3
-rw-r--r--fs/netfs/direct_write.c56
-rw-r--r--fs/netfs/fscache_io.c14
-rw-r--r--fs/netfs/internal.h55
-rw-r--r--fs/netfs/io.c162
-rw-r--r--fs/netfs/main.c55
-rw-r--r--fs/netfs/misc.c10
-rw-r--r--fs/netfs/objects.c81
-rw-r--r--fs/netfs/output.c478
-rw-r--r--fs/netfs/stats.c17
-rw-r--r--fs/netfs/write_collect.c808
-rw-r--r--fs/netfs/write_issue.c684
-rw-r--r--fs/nfs/file.c8
-rw-r--r--fs/nfs/fscache.h6
-rw-r--r--fs/nfs/inode.c7
-rw-r--r--fs/nfs/iostat.h5
-rw-r--r--fs/nfs/write.c4
-rw-r--r--fs/nfsd/export.c16
-rw-r--r--fs/nfsd/netlink.c66
-rw-r--r--fs/nfsd/netlink.h10
-rw-r--r--fs/nfsd/netns.h1
-rw-r--r--fs/nfsd/nfs4callback.c53
-rw-r--r--fs/nfsd/nfs4proc.c79
-rw-r--r--fs/nfsd/nfs4state.c231
-rw-r--r--fs/nfsd/nfs4xdr.c132
-rw-r--r--fs/nfsd/nfsctl.c526
-rw-r--r--fs/nfsd/nfsd.h3
-rw-r--r--fs/nfsd/nfsfh.c4
-rw-r--r--fs/nfsd/nfssvc.c11
-rw-r--r--fs/nfsd/state.h8
-rw-r--r--fs/nfsd/stats.c42
-rw-r--r--fs/nfsd/stats.h5
-rw-r--r--fs/nfsd/trace.h100
-rw-r--r--fs/nfsd/vfs.c5
-rw-r--r--fs/nfsd/vfs.h8
-rw-r--r--fs/nfsd/xdr4.h24
-rw-r--r--fs/nilfs2/btree.c23
-rw-r--r--fs/nilfs2/dir.c3
-rw-r--r--fs/nilfs2/gcinode.c1
-rw-r--r--fs/nilfs2/ioctl.c2
-rw-r--r--fs/nilfs2/nilfs.h4
-rw-r--r--fs/nilfs2/recovery.c5
-rw-r--r--fs/nilfs2/segment.c8
-rw-r--r--fs/nilfs2/super.c388
-rw-r--r--fs/nilfs2/the_nilfs.c25
-rw-r--r--fs/nilfs2/the_nilfs.h6
-rw-r--r--fs/ntfs3/Kconfig9
-rw-r--r--fs/ntfs3/bitmap.c4
-rw-r--r--fs/ntfs3/dir.c7
-rw-r--r--fs/ntfs3/file.c8
-rw-r--r--fs/ntfs3/fsntfs.c2
-rw-r--r--fs/ntfs3/index.c11
-rw-r--r--fs/ntfs3/inode.c20
-rw-r--r--fs/ntfs3/ntfs_fs.h8
-rw-r--r--fs/ntfs3/super.c67
-rw-r--r--fs/ocfs2/aops.c2
-rw-r--r--fs/ocfs2/cluster/tcp.c5
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c12
-rw-r--r--fs/ocfs2/export.c12
-rw-r--r--fs/ocfs2/file.c2
-rw-r--r--fs/ocfs2/inode.c2
-rw-r--r--fs/ocfs2/ioctl.c1
-rw-r--r--fs/ocfs2/localalloc.c34
-rw-r--r--fs/ocfs2/move_extents.c2
-rw-r--r--fs/ocfs2/namei.c4
-rw-r--r--fs/ocfs2/ocfs2_fs.h3
-rw-r--r--fs/ocfs2/refcounttree.c2
-rw-r--r--fs/ocfs2/reservations.c2
-rw-r--r--fs/ocfs2/resize.c8
-rw-r--r--fs/ocfs2/suballoc.c117
-rw-r--r--fs/ocfs2/suballoc.h6
-rw-r--r--fs/openpromfs/inode.c8
-rw-r--r--fs/orangefs/dcache.c4
-rw-r--r--fs/orangefs/namei.c26
-rw-r--r--fs/orangefs/super.c20
-rw-r--r--fs/overlayfs/copy_up.c2
-rw-r--r--fs/overlayfs/params.c4
-rw-r--r--fs/overlayfs/super.c2
-rw-r--r--fs/proc/Makefile2
-rw-r--r--fs/proc/bootconfig.c12
-rw-r--r--fs/proc/fd.c42
-rw-r--r--fs/proc/inode.c10
-rw-r--r--fs/proc/meminfo.c3
-rw-r--r--fs/proc/page.c76
-rw-r--r--fs/proc/proc_net.c1
-rw-r--r--fs/proc/proc_sysctl.c21
-rw-r--r--fs/proc/task_mmu.c117
-rw-r--r--fs/proc/vmcore.c7
-rw-r--r--fs/qnx6/inode.c117
-rw-r--r--fs/ramfs/file-mmu.c2
-rw-r--r--fs/read_write.c2
-rw-r--r--fs/reiserfs/item_ops.c13
-rw-r--r--fs/reiserfs/journal.c2
-rw-r--r--fs/romfs/super.c2
-rw-r--r--fs/seq_file.c13
-rw-r--r--fs/signalfd.c44
-rw-r--r--fs/smb/client/Kconfig1
-rw-r--r--fs/smb/client/cached_dir.c6
-rw-r--r--fs/smb/client/cifs_debug.c6
-rw-r--r--fs/smb/client/cifsfs.c138
-rw-r--r--fs/smb/client/cifsfs.h11
-rw-r--r--fs/smb/client/cifsglob.h89
-rw-r--r--fs/smb/client/cifspdu.h4
-rw-r--r--fs/smb/client/cifsproto.h41
-rw-r--r--fs/smb/client/cifssmb.c126
-rw-r--r--fs/smb/client/connect.c180
-rw-r--r--fs/smb/client/dfs.c51
-rw-r--r--fs/smb/client/dfs.h33
-rw-r--r--fs/smb/client/dfs_cache.c53
-rw-r--r--fs/smb/client/dir.c22
-rw-r--r--fs/smb/client/file.c2855
-rw-r--r--fs/smb/client/fs_context.c39
-rw-r--r--fs/smb/client/fs_context.h16
-rw-r--r--fs/smb/client/fscache.c145
-rw-r--r--fs/smb/client/fscache.h54
-rw-r--r--fs/smb/client/inode.c51
-rw-r--r--fs/smb/client/ioctl.c6
-rw-r--r--fs/smb/client/misc.c20
-rw-r--r--fs/smb/client/smb1ops.c4
-rw-r--r--fs/smb/client/smb2misc.c14
-rw-r--r--fs/smb/client/smb2ops.c124
-rw-r--r--fs/smb/client/smb2pdu.c207
-rw-r--r--fs/smb/client/smb2pdu.h14
-rw-r--r--fs/smb/client/smb2proto.h5
-rw-r--r--fs/smb/client/smb2transport.c4
-rw-r--r--fs/smb/client/trace.h240
-rw-r--r--fs/smb/client/transport.c24
-rw-r--r--fs/smb/common/smb2pdu.h35
-rw-r--r--fs/smb/server/ksmbd_netlink.h38
-rw-r--r--fs/smb/server/mgmt/share_config.c7
-rw-r--r--fs/smb/server/oplock.c65
-rw-r--r--fs/smb/server/server.c13
-rw-r--r--fs/smb/server/smb2ops.c10
-rw-r--r--fs/smb/server/smb2pdu.c24
-rw-r--r--fs/smb/server/smb2pdu.h18
-rw-r--r--fs/smb/server/smb_common.c4
-rw-r--r--fs/smb/server/transport_ipc.c37
-rw-r--r--fs/smb/server/transport_tcp.c4
-rw-r--r--fs/smb/server/vfs.c5
-rw-r--r--fs/smb/server/vfs_cache.c28
-rw-r--r--fs/smb/server/vfs_cache.h2
-rw-r--r--fs/squashfs/file.c6
-rw-r--r--fs/squashfs/file_direct.c3
-rw-r--r--fs/squashfs/inode.c5
-rw-r--r--fs/squashfs/namei.c14
-rw-r--r--fs/squashfs/symlink.c35
-rw-r--r--fs/stat.c1
-rw-r--r--fs/super.c24
-rw-r--r--fs/sysfs/file.c2
-rw-r--r--fs/timerfd.c36
-rw-r--r--fs/tracefs/event_inode.c169
-rw-r--r--fs/tracefs/inode.c288
-rw-r--r--fs/tracefs/internal.h14
-rw-r--r--fs/unicode/Makefile14
-rw-r--r--fs/userfaultfd.c53
-rw-r--r--fs/vboxsf/file.c1
-rw-r--r--fs/vboxsf/super.c9
-rw-r--r--fs/vboxsf/utils.c3
-rw-r--r--fs/verity/init.c7
-rw-r--r--fs/xfs/Makefile4
-rw-r--r--fs/xfs/libxfs/xfs_sb.c40
-rw-r--r--fs/xfs/libxfs/xfs_sb.h5
-rw-r--r--fs/xfs/scrub/common.c4
-rw-r--r--fs/xfs/xfs_aops.c7
-rw-r--r--fs/xfs/xfs_buf.c2
-rw-r--r--fs/xfs/xfs_file.c10
-rw-r--r--fs/xfs/xfs_icache.c8
-rw-r--r--fs/xfs/xfs_inode.c15
-rw-r--r--fs/xfs/xfs_super.c6
-rw-r--r--fs/xfs/xfs_trans.h9
-rw-r--r--fs/zonefs/super.c2
508 files changed, 20385 insertions, 17235 deletions
diff --git a/fs/9p/fid.h b/fs/9p/fid.h
index 29281b7c3887..0d6138bee2a3 100644
--- a/fs/9p/fid.h
+++ b/fs/9p/fid.h
@@ -49,9 +49,6 @@ static inline struct p9_fid *v9fs_fid_clone(struct dentry *dentry)
static inline void v9fs_fid_add_modes(struct p9_fid *fid, unsigned int s_flags,
unsigned int s_cache, unsigned int f_flags)
{
- if (fid->qid.type != P9_QTFILE)
- return;
-
if ((!s_cache) ||
((fid->qid.version == 0) && !(s_flags & V9FS_IGNORE_QV)) ||
(s_flags & V9FS_DIRECT_IO) || (f_flags & O_DIRECT)) {
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index 9defa12208f9..1775fcc7f0e8 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -179,13 +179,14 @@ extern int v9fs_vfs_rename(struct mnt_idmap *idmap,
struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags);
-extern struct inode *v9fs_fid_iget(struct super_block *sb, struct p9_fid *fid);
+extern struct inode *v9fs_fid_iget(struct super_block *sb, struct p9_fid *fid,
+ bool new);
extern const struct inode_operations v9fs_dir_inode_operations_dotl;
extern const struct inode_operations v9fs_file_inode_operations_dotl;
extern const struct inode_operations v9fs_symlink_inode_operations_dotl;
extern const struct netfs_request_ops v9fs_req_ops;
extern struct inode *v9fs_fid_iget_dotl(struct super_block *sb,
- struct p9_fid *fid);
+ struct p9_fid *fid, bool new);
/* other default globals */
#define V9FS_PORT 564
@@ -224,12 +225,12 @@ static inline int v9fs_proto_dotl(struct v9fs_session_info *v9ses)
*/
static inline struct inode *
v9fs_get_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
- struct super_block *sb)
+ struct super_block *sb, bool new)
{
if (v9fs_proto_dotl(v9ses))
- return v9fs_fid_iget_dotl(sb, fid);
+ return v9fs_fid_iget_dotl(sb, fid, new);
else
- return v9fs_fid_iget(sb, fid);
+ return v9fs_fid_iget(sb, fid, new);
}
#endif
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index 047855033d32..a97ceb105cd8 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -26,36 +26,38 @@
#include "cache.h"
#include "fid.h"
-static void v9fs_upload_to_server(struct netfs_io_subrequest *subreq)
+/*
+ * Writeback calls this when it finds a folio that needs uploading. This isn't
+ * called if writeback only has copy-to-cache to deal with.
+ */
+static void v9fs_begin_writeback(struct netfs_io_request *wreq)
{
- struct p9_fid *fid = subreq->rreq->netfs_priv;
- int err, len;
-
- trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
- len = p9_client_write(fid, subreq->start, &subreq->io_iter, &err);
- netfs_write_subrequest_terminated(subreq, len ?: err, false);
-}
+ struct p9_fid *fid;
-static void v9fs_upload_to_server_worker(struct work_struct *work)
-{
- struct netfs_io_subrequest *subreq =
- container_of(work, struct netfs_io_subrequest, work);
+ fid = v9fs_fid_find_inode(wreq->inode, true, INVALID_UID, true);
+ if (!fid) {
+ WARN_ONCE(1, "folio expected an open fid inode->i_ino=%lx\n",
+ wreq->inode->i_ino);
+ return;
+ }
- v9fs_upload_to_server(subreq);
+ wreq->wsize = fid->clnt->msize - P9_IOHDRSZ;
+ if (fid->iounit)
+ wreq->wsize = min(wreq->wsize, fid->iounit);
+ wreq->netfs_priv = fid;
+ wreq->io_streams[0].avail = true;
}
/*
- * Set up write requests for a writeback slice. We need to add a write request
- * for each write we want to make.
+ * Issue a subrequest to write to the server.
*/
-static void v9fs_create_write_requests(struct netfs_io_request *wreq, loff_t start, size_t len)
+static void v9fs_issue_write(struct netfs_io_subrequest *subreq)
{
- struct netfs_io_subrequest *subreq;
+ struct p9_fid *fid = subreq->rreq->netfs_priv;
+ int err, len;
- subreq = netfs_create_write_request(wreq, NETFS_UPLOAD_TO_SERVER,
- start, len, v9fs_upload_to_server_worker);
- if (subreq)
- netfs_queue_write_request(subreq);
+ len = p9_client_write(fid, subreq->start, &subreq->io_iter, &err);
+ netfs_write_subrequest_terminated(subreq, len ?: err, false);
}
/**
@@ -87,12 +89,16 @@ static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
{
struct p9_fid *fid;
bool writing = (rreq->origin == NETFS_READ_FOR_WRITE ||
- rreq->origin == NETFS_WRITEBACK ||
rreq->origin == NETFS_WRITETHROUGH ||
- rreq->origin == NETFS_LAUNDER_WRITE ||
rreq->origin == NETFS_UNBUFFERED_WRITE ||
rreq->origin == NETFS_DIO_WRITE);
+ if (rreq->origin == NETFS_WRITEBACK)
+ return 0; /* We don't get the write handle until we find we
+ * have actually dirty data and not just
+ * copy-to-cache data.
+ */
+
if (file) {
fid = file->private_data;
if (!fid)
@@ -104,6 +110,10 @@ static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
goto no_fid;
}
+ rreq->wsize = fid->clnt->msize - P9_IOHDRSZ;
+ if (fid->iounit)
+ rreq->wsize = min(rreq->wsize, fid->iounit);
+
/* we might need to read from a fid that was opened write-only
* for read-modify-write of page cache, use the writeback fid
* for that */
@@ -132,7 +142,8 @@ const struct netfs_request_ops v9fs_req_ops = {
.init_request = v9fs_init_request,
.free_request = v9fs_free_request,
.issue_read = v9fs_issue_read,
- .create_write_requests = v9fs_create_write_requests,
+ .begin_writeback = v9fs_begin_writeback,
+ .issue_write = v9fs_issue_write,
};
const struct address_space_operations v9fs_addr_operations = {
@@ -141,7 +152,6 @@ const struct address_space_operations v9fs_addr_operations = {
.dirty_folio = netfs_dirty_folio,
.release_folio = netfs_release_folio,
.invalidate_folio = netfs_invalidate_folio,
- .launder_folio = netfs_launder_folio,
.direct_IO = noop_direct_IO,
.writepages = netfs_writepages,
};
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index abdbbaee5184..348cc90bf9c5 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -520,6 +520,7 @@ const struct file_operations v9fs_file_operations = {
.splice_read = v9fs_file_splice_read,
.splice_write = iter_file_splice_write,
.fsync = v9fs_file_fsync,
+ .setlease = simple_nosetlease,
};
const struct file_operations v9fs_file_operations_dotl = {
@@ -534,4 +535,5 @@ const struct file_operations v9fs_file_operations_dotl = {
.splice_read = v9fs_file_splice_read,
.splice_write = iter_file_splice_write,
.fsync = v9fs_file_fsync_dotl,
+ .setlease = simple_nosetlease,
};
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 360a5304ec03..7a3308d77606 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -83,7 +83,7 @@ static int p9mode2perm(struct v9fs_session_info *v9ses,
int res;
int mode = stat->mode;
- res = mode & S_IALLUGO;
+ res = mode & 0777; /* S_IRWXUGO */
if (v9fs_proto_dotu(v9ses)) {
if ((mode & P9_DMSETUID) == P9_DMSETUID)
res |= S_ISUID;
@@ -178,6 +178,9 @@ int v9fs_uflags2omode(int uflags, int extended)
break;
}
+ if (uflags & O_TRUNC)
+ ret |= P9_OTRUNC;
+
if (extended) {
if (uflags & O_EXCL)
ret |= P9_OEXCL;
@@ -344,20 +347,25 @@ void v9fs_evict_inode(struct inode *inode)
struct v9fs_inode __maybe_unused *v9inode = V9FS_I(inode);
__le32 __maybe_unused version;
- truncate_inode_pages_final(&inode->i_data);
+ if (!is_bad_inode(inode)) {
+ truncate_inode_pages_final(&inode->i_data);
- version = cpu_to_le32(v9inode->qid.version);
- netfs_clear_inode_writeback(inode, &version);
+ version = cpu_to_le32(v9inode->qid.version);
+ netfs_clear_inode_writeback(inode, &version);
- clear_inode(inode);
- filemap_fdatawrite(&inode->i_data);
+ clear_inode(inode);
+ filemap_fdatawrite(&inode->i_data);
#ifdef CONFIG_9P_FSCACHE
- fscache_relinquish_cookie(v9fs_inode_cookie(v9inode), false);
+ if (v9fs_inode_cookie(v9inode))
+ fscache_relinquish_cookie(v9fs_inode_cookie(v9inode), false);
#endif
+ } else
+ clear_inode(inode);
}
-struct inode *v9fs_fid_iget(struct super_block *sb, struct p9_fid *fid)
+struct inode *
+v9fs_fid_iget(struct super_block *sb, struct p9_fid *fid, bool new)
{
dev_t rdev;
int retval;
@@ -369,8 +377,18 @@ struct inode *v9fs_fid_iget(struct super_block *sb, struct p9_fid *fid)
inode = iget_locked(sb, QID2INO(&fid->qid));
if (unlikely(!inode))
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
- return inode;
+ if (!(inode->i_state & I_NEW)) {
+ if (!new) {
+ goto done;
+ } else {
+ p9_debug(P9_DEBUG_VFS, "WARNING: Inode collision %ld\n",
+ inode->i_ino);
+ iput(inode);
+ remove_inode_hash(inode);
+ inode = iget_locked(sb, QID2INO(&fid->qid));
+ WARN_ON(!(inode->i_state & I_NEW));
+ }
+ }
/*
* initialize the inode with the stat info
@@ -394,11 +412,11 @@ struct inode *v9fs_fid_iget(struct super_block *sb, struct p9_fid *fid)
v9fs_set_netfs_context(inode);
v9fs_cache_inode_get_cookie(inode);
unlock_new_inode(inode);
+done:
return inode;
error:
iget_failed(inode);
return ERR_PTR(retval);
-
}
/**
@@ -430,8 +448,15 @@ static int v9fs_at_to_dotl_flags(int flags)
*/
static void v9fs_dec_count(struct inode *inode)
{
- if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2)
- drop_nlink(inode);
+ if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2) {
+ if (inode->i_nlink) {
+ drop_nlink(inode);
+ } else {
+ p9_debug(P9_DEBUG_VFS,
+ "WARNING: unexpected i_nlink zero %d inode %ld\n",
+ inode->i_nlink, inode->i_ino);
+ }
+ }
}
/**
@@ -482,6 +507,9 @@ static int v9fs_remove(struct inode *dir, struct dentry *dentry, int flags)
} else
v9fs_dec_count(inode);
+ if (inode->i_nlink <= 0) /* no more refs unhash it */
+ remove_inode_hash(inode);
+
v9fs_invalidate_inode_attr(inode);
v9fs_invalidate_inode_attr(dir);
@@ -547,7 +575,7 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
/*
* instantiate inode and assign the unopened fid to the dentry
*/
- inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
+ inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb, true);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
p9_debug(P9_DEBUG_VFS,
@@ -676,7 +704,7 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
else if (IS_ERR(fid))
inode = ERR_CAST(fid);
else
- inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
+ inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb, false);
/*
* If we had a rename on the server and a parallel lookup
* for the new name, then make sure we instantiate with
@@ -1057,8 +1085,6 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
struct v9fs_session_info *v9ses = sb->s_fs_info;
struct v9fs_inode *v9inode = V9FS_I(inode);
- set_nlink(inode, 1);
-
inode_set_atime(inode, stat->atime, 0);
inode_set_mtime(inode, stat->mtime, 0);
inode_set_ctime(inode, stat->mtime, 0);
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index ef9db3e03506..c61b97bd13b9 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -52,7 +52,10 @@ static kgid_t v9fs_get_fsgid_for_create(struct inode *dir_inode)
return current_fsgid();
}
-struct inode *v9fs_fid_iget_dotl(struct super_block *sb, struct p9_fid *fid)
+
+
+struct inode *
+v9fs_fid_iget_dotl(struct super_block *sb, struct p9_fid *fid, bool new)
{
int retval;
struct inode *inode;
@@ -62,8 +65,18 @@ struct inode *v9fs_fid_iget_dotl(struct super_block *sb, struct p9_fid *fid)
inode = iget_locked(sb, QID2INO(&fid->qid));
if (unlikely(!inode))
return ERR_PTR(-ENOMEM);
- if (!(inode->i_state & I_NEW))
- return inode;
+ if (!(inode->i_state & I_NEW)) {
+ if (!new) {
+ goto done;
+ } else { /* deal with race condition in inode number reuse */
+ p9_debug(P9_DEBUG_ERROR, "WARNING: Inode collision %lx\n",
+ inode->i_ino);
+ iput(inode);
+ remove_inode_hash(inode);
+ inode = iget_locked(sb, QID2INO(&fid->qid));
+ WARN_ON(!(inode->i_state & I_NEW));
+ }
+ }
/*
* initialize the inode with the stat info
@@ -78,11 +91,11 @@ struct inode *v9fs_fid_iget_dotl(struct super_block *sb, struct p9_fid *fid)
retval = v9fs_init_inode(v9ses, inode, &fid->qid,
st->st_mode, new_decode_dev(st->st_rdev));
+ v9fs_stat2inode_dotl(st, inode, 0);
kfree(st);
if (retval)
goto error;
- v9fs_stat2inode_dotl(st, inode, 0);
v9fs_set_netfs_context(inode);
v9fs_cache_inode_get_cookie(inode);
retval = v9fs_get_acl(inode, fid);
@@ -90,12 +103,11 @@ struct inode *v9fs_fid_iget_dotl(struct super_block *sb, struct p9_fid *fid)
goto error;
unlock_new_inode(inode);
-
+done:
return inode;
error:
iget_failed(inode);
return ERR_PTR(retval);
-
}
struct dotl_openflag_map {
@@ -247,7 +259,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
goto out;
}
- inode = v9fs_fid_iget_dotl(dir->i_sb, fid);
+ inode = v9fs_fid_iget_dotl(dir->i_sb, fid, true);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n", err);
@@ -297,7 +309,6 @@ static int v9fs_vfs_mkdir_dotl(struct mnt_idmap *idmap,
umode_t omode)
{
int err;
- struct v9fs_session_info *v9ses;
struct p9_fid *fid = NULL, *dfid = NULL;
kgid_t gid;
const unsigned char *name;
@@ -307,7 +318,6 @@ static int v9fs_vfs_mkdir_dotl(struct mnt_idmap *idmap,
struct posix_acl *dacl = NULL, *pacl = NULL;
p9_debug(P9_DEBUG_VFS, "name %pd\n", dentry);
- v9ses = v9fs_inode2v9ses(dir);
omode |= S_IFDIR;
if (dir->i_mode & S_ISGID)
@@ -342,7 +352,7 @@ static int v9fs_vfs_mkdir_dotl(struct mnt_idmap *idmap,
}
/* instantiate inode and assign the unopened fid to the dentry */
- inode = v9fs_fid_iget_dotl(dir->i_sb, fid);
+ inode = v9fs_fid_iget_dotl(dir->i_sb, fid, true);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n",
@@ -739,7 +749,6 @@ v9fs_vfs_mknod_dotl(struct mnt_idmap *idmap, struct inode *dir,
kgid_t gid;
const unsigned char *name;
umode_t mode;
- struct v9fs_session_info *v9ses;
struct p9_fid *fid = NULL, *dfid = NULL;
struct inode *inode;
struct p9_qid qid;
@@ -749,7 +758,6 @@ v9fs_vfs_mknod_dotl(struct mnt_idmap *idmap, struct inode *dir,
dir->i_ino, dentry, omode,
MAJOR(rdev), MINOR(rdev));
- v9ses = v9fs_inode2v9ses(dir);
dfid = v9fs_parent_fid(dentry);
if (IS_ERR(dfid)) {
err = PTR_ERR(dfid);
@@ -780,7 +788,7 @@ v9fs_vfs_mknod_dotl(struct mnt_idmap *idmap, struct inode *dir,
err);
goto error;
}
- inode = v9fs_fid_iget_dotl(dir->i_sb, fid);
+ inode = v9fs_fid_iget_dotl(dir->i_sb, fid, true);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n",
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 4236058c7bbd..f52fdf42945c 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -139,7 +139,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
else
sb->s_d_op = &v9fs_dentry_operations;
- inode = v9fs_get_inode_from_fid(v9ses, fid, sb);
+ inode = v9fs_get_inode_from_fid(v9ses, fid, sb, true);
if (IS_ERR(inode)) {
retval = PTR_ERR(inode);
goto release_sb;
@@ -244,6 +244,21 @@ done:
return res;
}
+static int v9fs_drop_inode(struct inode *inode)
+{
+ struct v9fs_session_info *v9ses;
+
+ v9ses = v9fs_inode2v9ses(inode);
+ if (v9ses->cache & (CACHE_META|CACHE_LOOSE))
+ return generic_drop_inode(inode);
+ /*
+ * in case of non cached mode always drop the
+ * inode because we want the inode attribute
+ * to always match that on the server.
+ */
+ return 1;
+}
+
static int v9fs_write_inode(struct inode *inode,
struct writeback_control *wbc)
{
@@ -268,6 +283,7 @@ static const struct super_operations v9fs_super_ops = {
.alloc_inode = v9fs_alloc_inode,
.free_inode = v9fs_free_inode,
.statfs = simple_statfs,
+ .drop_inode = v9fs_drop_inode,
.evict_inode = v9fs_evict_inode,
.show_options = v9fs_show_options,
.umount_begin = v9fs_umount_begin,
@@ -278,6 +294,7 @@ static const struct super_operations v9fs_super_ops_dotl = {
.alloc_inode = v9fs_alloc_inode,
.free_inode = v9fs_free_inode,
.statfs = v9fs_statfs,
+ .drop_inode = v9fs_drop_inode,
.evict_inode = v9fs_evict_inode,
.show_options = v9fs_show_options,
.umount_begin = v9fs_umount_begin,
diff --git a/fs/afs/file.c b/fs/afs/file.c
index ef2cc8f565d2..c3f0c45ae9a9 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -54,7 +54,6 @@ const struct address_space_operations afs_file_aops = {
.read_folio = netfs_read_folio,
.readahead = netfs_readahead,
.dirty_folio = netfs_dirty_folio,
- .launder_folio = netfs_launder_folio,
.release_folio = netfs_release_folio,
.invalidate_folio = netfs_invalidate_folio,
.migrate_folio = filemap_migrate_folio,
@@ -354,7 +353,7 @@ static int afs_init_request(struct netfs_io_request *rreq, struct file *file)
if (file)
rreq->netfs_priv = key_get(afs_file_key(file));
rreq->rsize = 256 * 1024;
- rreq->wsize = 256 * 1024;
+ rreq->wsize = 256 * 1024 * 1024;
return 0;
}
@@ -369,6 +368,7 @@ static int afs_check_write_begin(struct file *file, loff_t pos, unsigned len,
static void afs_free_request(struct netfs_io_request *rreq)
{
key_put(rreq->netfs_priv);
+ afs_put_wb_key(rreq->netfs_priv2);
}
static void afs_update_i_size(struct inode *inode, loff_t new_i_size)
@@ -400,7 +400,9 @@ const struct netfs_request_ops afs_req_ops = {
.issue_read = afs_issue_read,
.update_i_size = afs_update_i_size,
.invalidate_cache = afs_netfs_invalidate_cache,
- .create_write_requests = afs_create_write_requests,
+ .begin_writeback = afs_begin_writeback,
+ .prepare_write = afs_prepare_write,
+ .issue_write = afs_issue_write,
};
static void afs_add_open_mmap(struct afs_vnode *vnode)
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 6ce5a612937c..6e1d3c4daf72 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -916,7 +916,6 @@ struct afs_operation {
loff_t pos;
loff_t size;
loff_t i_size;
- bool laundering; /* Laundering page, PG_writeback not set */
} store;
struct {
struct iattr *attr;
@@ -1599,11 +1598,14 @@ extern int afs_check_volume_status(struct afs_volume *, struct afs_operation *);
/*
* write.c
*/
+void afs_prepare_write(struct netfs_io_subrequest *subreq);
+void afs_issue_write(struct netfs_io_subrequest *subreq);
+void afs_begin_writeback(struct netfs_io_request *wreq);
+void afs_retry_request(struct netfs_io_request *wreq, struct netfs_io_stream *stream);
extern int afs_writepages(struct address_space *, struct writeback_control *);
extern int afs_fsync(struct file *, loff_t, loff_t, int);
extern vm_fault_t afs_page_mkwrite(struct vm_fault *vmf);
extern void afs_prune_wb_keys(struct afs_vnode *);
-void afs_create_write_requests(struct netfs_io_request *wreq, loff_t start, size_t len);
/*
* xattr.c
diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c
index ed04bd1eeae8..ed09d4d4c211 100644
--- a/fs/afs/rotate.c
+++ b/fs/afs/rotate.c
@@ -541,11 +541,13 @@ pick_server:
test_bit(AFS_SE_EXCLUDED, &se->flags) ||
!test_bit(AFS_SERVER_FL_RESPONDING, &s->flags))
continue;
- es = op->server_states->endpoint_state;
+ es = op->server_states[i].endpoint_state;
sal = es->addresses;
afs_get_address_preferences_rcu(op->net, sal);
for (j = 0; j < sal->nr_addrs; j++) {
+ if (es->failed_set & (1 << j))
+ continue;
if (!sal->addrs[j].peer)
continue;
if (sal->addrs[j].prio > best_prio) {
@@ -605,6 +607,8 @@ iterate_address:
best_prio = -1;
addr_index = 0;
for (i = 0; i < alist->nr_addrs; i++) {
+ if (!(set & (1 << i)))
+ continue;
if (alist->addrs[i].prio > best_prio) {
addr_index = i;
best_prio = alist->addrs[i].prio;
@@ -674,7 +678,7 @@ no_more_servers:
for (i = 0; i < op->server_list->nr_servers; i++) {
struct afs_endpoint_state *estate;
- estate = op->server_states->endpoint_state;
+ estate = op->server_states[i].endpoint_state;
error = READ_ONCE(estate->error);
if (error < 0)
afs_op_accumulate_error(op, error, estate->abort_code);
diff --git a/fs/afs/validation.c b/fs/afs/validation.c
index 32a53fc8dfb2..bef8af12ebe2 100644
--- a/fs/afs/validation.c
+++ b/fs/afs/validation.c
@@ -365,9 +365,9 @@ static void afs_zap_data(struct afs_vnode *vnode)
* written back in a regular file and completely discard the pages in a
* directory or symlink */
if (S_ISREG(vnode->netfs.inode.i_mode))
- invalidate_remote_inode(&vnode->netfs.inode);
+ filemap_invalidate_inode(&vnode->netfs.inode, true, 0, LLONG_MAX);
else
- invalidate_inode_pages2(vnode->netfs.inode.i_mapping);
+ filemap_invalidate_inode(&vnode->netfs.inode, false, 0, LLONG_MAX);
}
/*
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 74402d95a884..e959640694c2 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -29,43 +29,39 @@ static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsign
/*
* Find a key to use for the writeback. We cached the keys used to author the
- * writes on the vnode. *_wbk will contain the last writeback key used or NULL
- * and we need to start from there if it's set.
+ * writes on the vnode. wreq->netfs_priv2 will contain the last writeback key
+ * record used or NULL and we need to start from there if it's set.
+ * wreq->netfs_priv will be set to the key itself or NULL.
*/
-static int afs_get_writeback_key(struct afs_vnode *vnode,
- struct afs_wb_key **_wbk)
+static void afs_get_writeback_key(struct netfs_io_request *wreq)
{
- struct afs_wb_key *wbk = NULL;
- struct list_head *p;
- int ret = -ENOKEY, ret2;
+ struct afs_wb_key *wbk, *old = wreq->netfs_priv2;
+ struct afs_vnode *vnode = AFS_FS_I(wreq->inode);
+
+ key_put(wreq->netfs_priv);
+ wreq->netfs_priv = NULL;
+ wreq->netfs_priv2 = NULL;
spin_lock(&vnode->wb_lock);
- if (*_wbk)
- p = (*_wbk)->vnode_link.next;
+ if (old)
+ wbk = list_next_entry(old, vnode_link);
else
- p = vnode->wb_keys.next;
+ wbk = list_first_entry(&vnode->wb_keys, struct afs_wb_key, vnode_link);
- while (p != &vnode->wb_keys) {
- wbk = list_entry(p, struct afs_wb_key, vnode_link);
+ list_for_each_entry_from(wbk, &vnode->wb_keys, vnode_link) {
_debug("wbk %u", key_serial(wbk->key));
- ret2 = key_validate(wbk->key);
- if (ret2 == 0) {
+ if (key_validate(wbk->key) == 0) {
refcount_inc(&wbk->usage);
+ wreq->netfs_priv = key_get(wbk->key);
+ wreq->netfs_priv2 = wbk;
_debug("USE WB KEY %u", key_serial(wbk->key));
break;
}
-
- wbk = NULL;
- if (ret == -ENOKEY)
- ret = ret2;
- p = p->next;
}
spin_unlock(&vnode->wb_lock);
- if (*_wbk)
- afs_put_wb_key(*_wbk);
- *_wbk = wbk;
- return 0;
+
+ afs_put_wb_key(old);
}
static void afs_store_data_success(struct afs_operation *op)
@@ -75,8 +71,7 @@ static void afs_store_data_success(struct afs_operation *op)
op->ctime = op->file[0].scb.status.mtime_client;
afs_vnode_commit_status(op, &op->file[0]);
if (!afs_op_error(op)) {
- if (!op->store.laundering)
- afs_pages_written_back(vnode, op->store.pos, op->store.size);
+ afs_pages_written_back(vnode, op->store.pos, op->store.size);
afs_stat_v(vnode, n_stores);
atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes);
}
@@ -89,113 +84,125 @@ static const struct afs_operation_ops afs_store_data_operation = {
};
/*
- * write to a file
+ * Prepare a subrequest to write to the server. This sets the max_len
+ * parameter.
+ */
+void afs_prepare_write(struct netfs_io_subrequest *subreq)
+{
+ //if (test_bit(NETFS_SREQ_RETRYING, &subreq->flags))
+ // subreq->max_len = 512 * 1024;
+ //else
+ subreq->max_len = 256 * 1024 * 1024;
+}
+
+/*
+ * Issue a subrequest to write to the server.
*/
-static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos,
- bool laundering)
+static void afs_issue_write_worker(struct work_struct *work)
{
+ struct netfs_io_subrequest *subreq = container_of(work, struct netfs_io_subrequest, work);
+ struct netfs_io_request *wreq = subreq->rreq;
struct afs_operation *op;
- struct afs_wb_key *wbk = NULL;
- loff_t size = iov_iter_count(iter);
+ struct afs_vnode *vnode = AFS_FS_I(wreq->inode);
+ unsigned long long pos = subreq->start + subreq->transferred;
+ size_t len = subreq->len - subreq->transferred;
int ret = -ENOKEY;
- _enter("%s{%llx:%llu.%u},%llx,%llx",
+ _enter("R=%x[%x],%s{%llx:%llu.%u},%llx,%zx",
+ wreq->debug_id, subreq->debug_index,
vnode->volume->name,
vnode->fid.vid,
vnode->fid.vnode,
vnode->fid.unique,
- size, pos);
+ pos, len);
- ret = afs_get_writeback_key(vnode, &wbk);
- if (ret) {
- _leave(" = %d [no keys]", ret);
- return ret;
- }
+#if 0 // Error injection
+ if (subreq->debug_index == 3)
+ return netfs_write_subrequest_terminated(subreq, -ENOANO, false);
- op = afs_alloc_operation(wbk->key, vnode->volume);
- if (IS_ERR(op)) {
- afs_put_wb_key(wbk);
- return -ENOMEM;
+ if (!test_bit(NETFS_SREQ_RETRYING, &subreq->flags)) {
+ set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
+ return netfs_write_subrequest_terminated(subreq, -EAGAIN, false);
}
+#endif
+
+ op = afs_alloc_operation(wreq->netfs_priv, vnode->volume);
+ if (IS_ERR(op))
+ return netfs_write_subrequest_terminated(subreq, -EAGAIN, false);
afs_op_set_vnode(op, 0, vnode);
- op->file[0].dv_delta = 1;
+ op->file[0].dv_delta = 1;
op->file[0].modification = true;
- op->store.pos = pos;
- op->store.size = size;
- op->store.laundering = laundering;
- op->flags |= AFS_OPERATION_UNINTR;
- op->ops = &afs_store_data_operation;
+ op->store.pos = pos;
+ op->store.size = len;
+ op->flags |= AFS_OPERATION_UNINTR;
+ op->ops = &afs_store_data_operation;
-try_next_key:
afs_begin_vnode_operation(op);
- op->store.write_iter = iter;
- op->store.i_size = max(pos + size, vnode->netfs.remote_i_size);
- op->mtime = inode_get_mtime(&vnode->netfs.inode);
+ op->store.write_iter = &subreq->io_iter;
+ op->store.i_size = umax(pos + len, vnode->netfs.remote_i_size);
+ op->mtime = inode_get_mtime(&vnode->netfs.inode);
afs_wait_for_operation(op);
-
- switch (afs_op_error(op)) {
+ ret = afs_put_operation(op);
+ switch (ret) {
case -EACCES:
case -EPERM:
case -ENOKEY:
case -EKEYEXPIRED:
case -EKEYREJECTED:
case -EKEYREVOKED:
- _debug("next");
-
- ret = afs_get_writeback_key(vnode, &wbk);
- if (ret == 0) {
- key_put(op->key);
- op->key = key_get(wbk->key);
- goto try_next_key;
- }
+ /* If there are more keys we can try, use the retry algorithm
+ * to rotate the keys.
+ */
+ if (wreq->netfs_priv2)
+ set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
break;
}
- afs_put_wb_key(wbk);
- _leave(" = %d", afs_op_error(op));
- return afs_put_operation(op);
+ netfs_write_subrequest_terminated(subreq, ret < 0 ? ret : subreq->len, false);
}
-static void afs_upload_to_server(struct netfs_io_subrequest *subreq)
+void afs_issue_write(struct netfs_io_subrequest *subreq)
{
- struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode);
- ssize_t ret;
-
- _enter("%x[%x],%zx",
- subreq->rreq->debug_id, subreq->debug_index, subreq->io_iter.count);
-
- trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
- ret = afs_store_data(vnode, &subreq->io_iter, subreq->start,
- subreq->rreq->origin == NETFS_LAUNDER_WRITE);
- netfs_write_subrequest_terminated(subreq, ret < 0 ? ret : subreq->len,
- false);
+ subreq->work.func = afs_issue_write_worker;
+ if (!queue_work(system_unbound_wq, &subreq->work))
+ WARN_ON_ONCE(1);
}
-static void afs_upload_to_server_worker(struct work_struct *work)
+/*
+ * Writeback calls this when it finds a folio that needs uploading. This isn't
+ * called if writeback only has copy-to-cache to deal with.
+ */
+void afs_begin_writeback(struct netfs_io_request *wreq)
{
- struct netfs_io_subrequest *subreq =
- container_of(work, struct netfs_io_subrequest, work);
-
- afs_upload_to_server(subreq);
+ afs_get_writeback_key(wreq);
+ wreq->io_streams[0].avail = true;
}
/*
- * Set up write requests for a writeback slice. We need to add a write request
- * for each write we want to make.
+ * Prepare to retry the writes in request. Use this to try rotating the
+ * available writeback keys.
*/
-void afs_create_write_requests(struct netfs_io_request *wreq, loff_t start, size_t len)
+void afs_retry_request(struct netfs_io_request *wreq, struct netfs_io_stream *stream)
{
- struct netfs_io_subrequest *subreq;
-
- _enter("%x,%llx-%llx", wreq->debug_id, start, start + len);
+ struct netfs_io_subrequest *subreq =
+ list_first_entry(&stream->subrequests,
+ struct netfs_io_subrequest, rreq_link);
- subreq = netfs_create_write_request(wreq, NETFS_UPLOAD_TO_SERVER,
- start, len, afs_upload_to_server_worker);
- if (subreq)
- netfs_queue_write_request(subreq);
+ switch (subreq->error) {
+ case -EACCES:
+ case -EPERM:
+ case -ENOKEY:
+ case -EKEYEXPIRED:
+ case -EKEYREJECTED:
+ case -EKEYREVOKED:
+ afs_get_writeback_key(wreq);
+ if (!wreq->netfs_priv)
+ stream->failed = true;
+ break;
+ }
}
/*
diff --git a/fs/aio.c b/fs/aio.c
index 9cdaa2faa536..6ed5507cd330 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -122,7 +122,7 @@ struct kioctx {
unsigned long mmap_base;
unsigned long mmap_size;
- struct page **ring_pages;
+ struct folio **ring_folios;
long nr_pages;
struct rcu_work free_rwork; /* see free_ioctx() */
@@ -160,7 +160,7 @@ struct kioctx {
spinlock_t completion_lock;
} ____cacheline_aligned_in_smp;
- struct page *internal_pages[AIO_RING_PAGES];
+ struct folio *internal_folios[AIO_RING_PAGES];
struct file *aio_ring_file;
unsigned id;
@@ -334,19 +334,20 @@ static void aio_free_ring(struct kioctx *ctx)
put_aio_ring_file(ctx);
for (i = 0; i < ctx->nr_pages; i++) {
- struct page *page;
- pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
- page_count(ctx->ring_pages[i]));
- page = ctx->ring_pages[i];
- if (!page)
+ struct folio *folio = ctx->ring_folios[i];
+
+ if (!folio)
continue;
- ctx->ring_pages[i] = NULL;
- put_page(page);
+
+ pr_debug("pid(%d) [%d] folio->count=%d\n", current->pid, i,
+ folio_ref_count(folio));
+ ctx->ring_folios[i] = NULL;
+ folio_put(folio);
}
- if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) {
- kfree(ctx->ring_pages);
- ctx->ring_pages = NULL;
+ if (ctx->ring_folios && ctx->ring_folios != ctx->internal_folios) {
+ kfree(ctx->ring_folios);
+ ctx->ring_folios = NULL;
}
}
@@ -441,7 +442,7 @@ static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
idx = src->index;
if (idx < (pgoff_t)ctx->nr_pages) {
/* Make sure the old folio hasn't already been changed */
- if (ctx->ring_pages[idx] != &src->page)
+ if (ctx->ring_folios[idx] != src)
rc = -EAGAIN;
} else
rc = -EINVAL;
@@ -465,8 +466,8 @@ static int aio_migrate_folio(struct address_space *mapping, struct folio *dst,
*/
spin_lock_irqsave(&ctx->completion_lock, flags);
folio_migrate_copy(dst, src);
- BUG_ON(ctx->ring_pages[idx] != &src->page);
- ctx->ring_pages[idx] = &dst->page;
+ BUG_ON(ctx->ring_folios[idx] != src);
+ ctx->ring_folios[idx] = dst;
spin_unlock_irqrestore(&ctx->completion_lock, flags);
/* The old folio is no longer accessible. */
@@ -516,28 +517,30 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
/ sizeof(struct io_event);
- ctx->ring_pages = ctx->internal_pages;
+ ctx->ring_folios = ctx->internal_folios;
if (nr_pages > AIO_RING_PAGES) {
- ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
- GFP_KERNEL);
- if (!ctx->ring_pages) {
+ ctx->ring_folios = kcalloc(nr_pages, sizeof(struct folio *),
+ GFP_KERNEL);
+ if (!ctx->ring_folios) {
put_aio_ring_file(ctx);
return -ENOMEM;
}
}
for (i = 0; i < nr_pages; i++) {
- struct page *page;
- page = find_or_create_page(file->f_mapping,
- i, GFP_USER | __GFP_ZERO);
- if (!page)
+ struct folio *folio;
+
+ folio = __filemap_get_folio(file->f_mapping, i,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
+ GFP_USER | __GFP_ZERO);
+ if (IS_ERR(folio))
break;
- pr_debug("pid(%d) page[%d]->count=%d\n",
- current->pid, i, page_count(page));
- SetPageUptodate(page);
- unlock_page(page);
- ctx->ring_pages[i] = page;
+ pr_debug("pid(%d) [%d] folio->count=%d\n", current->pid, i,
+ folio_ref_count(folio));
+ folio_end_read(folio, true);
+
+ ctx->ring_folios[i] = folio;
}
ctx->nr_pages = i;
@@ -570,7 +573,7 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
ctx->user_id = ctx->mmap_base;
ctx->nr_events = nr_events; /* trusted copy */
- ring = page_address(ctx->ring_pages[0]);
+ ring = folio_address(ctx->ring_folios[0]);
ring->nr = nr_events; /* user copy */
ring->id = ~0U;
ring->head = ring->tail = 0;
@@ -578,7 +581,7 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
ring->compat_features = AIO_RING_COMPAT_FEATURES;
ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
ring->header_length = sizeof(struct aio_ring);
- flush_dcache_page(ctx->ring_pages[0]);
+ flush_dcache_folio(ctx->ring_folios[0]);
return 0;
}
@@ -689,9 +692,9 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
/* While kioctx setup is in progress,
* we are protected from page migration
- * changes ring_pages by ->ring_lock.
+ * changes ring_folios by ->ring_lock.
*/
- ring = page_address(ctx->ring_pages[0]);
+ ring = folio_address(ctx->ring_folios[0]);
ring->id = ctx->id;
return 0;
}
@@ -1033,7 +1036,7 @@ static void user_refill_reqs_available(struct kioctx *ctx)
* against ctx->completed_events below will make sure we do the
* safe/right thing.
*/
- ring = page_address(ctx->ring_pages[0]);
+ ring = folio_address(ctx->ring_folios[0]);
head = ring->head;
refill_reqs_available(ctx, head, ctx->tail);
@@ -1145,12 +1148,12 @@ static void aio_complete(struct aio_kiocb *iocb)
if (++tail >= ctx->nr_events)
tail = 0;
- ev_page = page_address(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
+ ev_page = folio_address(ctx->ring_folios[pos / AIO_EVENTS_PER_PAGE]);
event = ev_page + pos % AIO_EVENTS_PER_PAGE;
*event = iocb->ki_res;
- flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
+ flush_dcache_folio(ctx->ring_folios[pos / AIO_EVENTS_PER_PAGE]);
pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
(void __user *)(unsigned long)iocb->ki_res.obj,
@@ -1163,10 +1166,10 @@ static void aio_complete(struct aio_kiocb *iocb)
ctx->tail = tail;
- ring = page_address(ctx->ring_pages[0]);
+ ring = folio_address(ctx->ring_folios[0]);
head = ring->head;
ring->tail = tail;
- flush_dcache_page(ctx->ring_pages[0]);
+ flush_dcache_folio(ctx->ring_folios[0]);
ctx->completed_events++;
if (ctx->completed_events > 1)
@@ -1202,8 +1205,8 @@ static void aio_complete(struct aio_kiocb *iocb)
spin_lock_irqsave(&ctx->wait.lock, flags);
list_for_each_entry_safe(curr, next, &ctx->wait.head, w.entry)
if (avail >= curr->min_nr) {
- list_del_init_careful(&curr->w.entry);
wake_up_process(curr->w.private);
+ list_del_init_careful(&curr->w.entry);
}
spin_unlock_irqrestore(&ctx->wait.lock, flags);
}
@@ -1238,8 +1241,8 @@ static long aio_read_events_ring(struct kioctx *ctx,
sched_annotate_sleep();
mutex_lock(&ctx->ring_lock);
- /* Access to ->ring_pages here is protected by ctx->ring_lock. */
- ring = page_address(ctx->ring_pages[0]);
+ /* Access to ->ring_folios here is protected by ctx->ring_lock. */
+ ring = folio_address(ctx->ring_folios[0]);
head = ring->head;
tail = ring->tail;
@@ -1260,20 +1263,20 @@ static long aio_read_events_ring(struct kioctx *ctx,
while (ret < nr) {
long avail;
struct io_event *ev;
- struct page *page;
+ struct folio *folio;
avail = (head <= tail ? tail : ctx->nr_events) - head;
if (head == tail)
break;
pos = head + AIO_EVENTS_OFFSET;
- page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE];
+ folio = ctx->ring_folios[pos / AIO_EVENTS_PER_PAGE];
pos %= AIO_EVENTS_PER_PAGE;
avail = min(avail, nr - ret);
avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos);
- ev = page_address(page);
+ ev = folio_address(folio);
copy_ret = copy_to_user(event + ret, ev + pos,
sizeof(*ev) * avail);
@@ -1287,9 +1290,9 @@ static long aio_read_events_ring(struct kioctx *ctx,
head %= ctx->nr_events;
}
- ring = page_address(ctx->ring_pages[0]);
+ ring = folio_address(ctx->ring_folios[0]);
ring->head = head;
- flush_dcache_page(ctx->ring_pages[0]);
+ flush_dcache_folio(ctx->ring_folios[0]);
pr_debug("%li h%u t%u\n", ret, head, tail);
out:
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 0496cb5b6eab..42bd1cb7c9cd 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -149,6 +149,38 @@ struct file *anon_inode_getfile(const char *name,
EXPORT_SYMBOL_GPL(anon_inode_getfile);
/**
+ * anon_inode_getfile_fmode - creates a new file instance by hooking it up to an
+ * anonymous inode, and a dentry that describe the "class"
+ * of the file
+ *
+ * @name: [in] name of the "class" of the new file
+ * @fops: [in] file operations for the new file
+ * @priv: [in] private data for the new file (will be file's private_data)
+ * @flags: [in] flags
+ * @f_mode: [in] fmode
+ *
+ * Creates a new file by hooking it on a single inode. This is useful for files
+ * that do not need to have a full-fledged inode in order to operate correctly.
+ * All the files created with anon_inode_getfile() will share a single inode,
+ * hence saving memory and avoiding code duplication for the file/inode/dentry
+ * setup. Allows setting the fmode. Returns the newly created file* or an error
+ * pointer.
+ */
+struct file *anon_inode_getfile_fmode(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags, fmode_t f_mode)
+{
+ struct file *file;
+
+ file = __anon_inode_getfile(name, fops, priv, flags, NULL, false);
+ if (!IS_ERR(file))
+ file->f_mode |= f_mode;
+
+ return file;
+}
+EXPORT_SYMBOL_GPL(anon_inode_getfile_fmode);
+
+/**
* anon_inode_create_getfile - Like anon_inode_getfile(), but creates a new
* !S_PRIVATE anon inode rather than reuse the
* singleton anon inode and calls the
@@ -271,6 +303,7 @@ int anon_inode_create_getfd(const char *name, const struct file_operations *fops
return __anon_inode_getfd(name, fops, priv, flags, context_inode, true);
}
+
static int __init anon_inode_init(void)
{
anon_inode_mnt = kern_mount(&anon_inode_fs_type);
diff --git a/fs/bcachefs/Makefile b/fs/bcachefs/Makefile
index b02796c8a595..66ca0bbee639 100644
--- a/fs/bcachefs/Makefile
+++ b/fs/bcachefs/Makefile
@@ -17,6 +17,7 @@ bcachefs-y := \
btree_journal_iter.o \
btree_key_cache.o \
btree_locking.o \
+ btree_node_scan.o \
btree_trans_commit.o \
btree_update.o \
btree_update_interior.o \
@@ -37,6 +38,7 @@ bcachefs-y := \
error.o \
extents.o \
extent_update.o \
+ eytzinger.o \
fs.o \
fs-common.o \
fs-ioctl.o \
@@ -67,6 +69,7 @@ bcachefs-y := \
quota.o \
rebalance.o \
recovery.o \
+ recovery_passes.o \
reflink.o \
replicas.o \
sb-clean.o \
diff --git a/fs/bcachefs/acl.c b/fs/bcachefs/acl.c
index 3640f417cce1..250d6c6d3a3a 100644
--- a/fs/bcachefs/acl.c
+++ b/fs/bcachefs/acl.c
@@ -281,37 +281,29 @@ struct posix_acl *bch2_get_acl(struct mnt_idmap *idmap,
struct xattr_search_key search = X_SEARCH(acl_to_xattr_type(type), "", 0);
struct btree_trans *trans = bch2_trans_get(c);
struct btree_iter iter = { NULL };
- struct bkey_s_c_xattr xattr;
struct posix_acl *acl = NULL;
- struct bkey_s_c k;
- int ret;
retry:
bch2_trans_begin(trans);
- ret = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
- &hash, inode_inum(inode), &search, 0);
- if (ret) {
- if (!bch2_err_matches(ret, ENOENT))
- acl = ERR_PTR(ret);
- goto out;
- }
-
- k = bch2_btree_iter_peek_slot(&iter);
- ret = bkey_err(k);
- if (ret) {
- acl = ERR_PTR(ret);
- goto out;
- }
+ struct bkey_s_c k = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
+ &hash, inode_inum(inode), &search, 0);
+ int ret = bkey_err(k);
+ if (ret)
+ goto err;
- xattr = bkey_s_c_to_xattr(k);
+ struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k);
acl = bch2_acl_from_disk(trans, xattr_val(xattr.v),
- le16_to_cpu(xattr.v->x_val_len));
+ le16_to_cpu(xattr.v->x_val_len));
+ ret = PTR_ERR_OR_ZERO(acl);
+err:
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ goto retry;
+
+ if (ret)
+ acl = !bch2_err_matches(ret, ENOENT) ? ERR_PTR(ret) : NULL;
- if (!IS_ERR(acl))
+ if (!IS_ERR_OR_NULL(acl))
set_cached_acl(&inode->v, type, acl);
-out:
- if (bch2_err_matches(PTR_ERR_OR_ZERO(acl), BCH_ERR_transaction_restart))
- goto retry;
bch2_trans_iter_exit(trans, &iter);
bch2_trans_put(trans);
@@ -368,7 +360,7 @@ retry:
ret = bch2_subvol_is_ro_trans(trans, inode->ei_subvol) ?:
bch2_inode_peek(trans, &inode_iter, &inode_u, inode_inum(inode),
- BTREE_ITER_INTENT);
+ BTREE_ITER_intent);
if (ret)
goto btree_err;
@@ -416,39 +408,30 @@ int bch2_acl_chmod(struct btree_trans *trans, subvol_inum inum,
struct bch_hash_info hash_info = bch2_hash_info_init(trans->c, inode);
struct xattr_search_key search = X_SEARCH(KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS, "", 0);
struct btree_iter iter;
- struct bkey_s_c_xattr xattr;
- struct bkey_i_xattr *new;
struct posix_acl *acl = NULL;
- struct bkey_s_c k;
- int ret;
- ret = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
- &hash_info, inum, &search, BTREE_ITER_INTENT);
+ struct bkey_s_c k = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
+ &hash_info, inum, &search, BTREE_ITER_intent);
+ int ret = bkey_err(k);
if (ret)
return bch2_err_matches(ret, ENOENT) ? 0 : ret;
- k = bch2_btree_iter_peek_slot(&iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
- xattr = bkey_s_c_to_xattr(k);
+ struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k);
acl = bch2_acl_from_disk(trans, xattr_val(xattr.v),
le16_to_cpu(xattr.v->x_val_len));
ret = PTR_ERR_OR_ZERO(acl);
- if (IS_ERR_OR_NULL(acl))
+ if (ret)
goto err;
- ret = allocate_dropping_locks_errcode(trans,
- __posix_acl_chmod(&acl, _gfp, mode));
+ ret = allocate_dropping_locks_errcode(trans, __posix_acl_chmod(&acl, _gfp, mode));
if (ret)
goto err;
- new = bch2_acl_to_xattr(trans, acl, ACL_TYPE_ACCESS);
- if (IS_ERR(new)) {
- ret = PTR_ERR(new);
+ struct bkey_i_xattr *new = bch2_acl_to_xattr(trans, acl, ACL_TYPE_ACCESS);
+ ret = PTR_ERR_OR_ZERO(new);
+ if (ret)
goto err;
- }
new->k.p = iter.pos;
ret = bch2_trans_update(trans, &iter, &new->k_i, 0);
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index 893e38f9db80..346cd91f91f9 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -195,7 +195,7 @@ static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
}
int bch2_alloc_v1_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
struct printbuf *err)
{
struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
@@ -211,7 +211,7 @@ fsck_err:
}
int bch2_alloc_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
struct printbuf *err)
{
struct bkey_alloc_unpacked u;
@@ -225,7 +225,7 @@ fsck_err:
}
int bch2_alloc_v3_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
struct printbuf *err)
{
struct bkey_alloc_unpacked u;
@@ -239,15 +239,15 @@ fsck_err:
}
int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags, struct printbuf *err)
+ enum bch_validate_flags flags, struct printbuf *err)
{
struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
int ret = 0;
- bkey_fsck_err_on(alloc_v4_u64s(a.v) > bkey_val_u64s(k.k), c, err,
+ bkey_fsck_err_on(alloc_v4_u64s_noerror(a.v) > bkey_val_u64s(k.k), c, err,
alloc_v4_val_size_bad,
"bad val size (%u > %zu)",
- alloc_v4_u64s(a.v), bkey_val_u64s(k.k));
+ alloc_v4_u64s_noerror(a.v), bkey_val_u64s(k.k));
bkey_fsck_err_on(!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) &&
BCH_ALLOC_V4_NR_BACKPOINTERS(a.v), c, err,
@@ -263,7 +263,7 @@ int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k,
case BCH_DATA_free:
case BCH_DATA_need_gc_gens:
case BCH_DATA_need_discard:
- bkey_fsck_err_on(bch2_bucket_sectors(*a.v) || a.v->stripe,
+ bkey_fsck_err_on(bch2_bucket_sectors_total(*a.v) || a.v->stripe,
c, err, alloc_key_empty_but_have_data,
"empty data type free but have data");
break;
@@ -330,27 +330,17 @@ void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c
prt_printf(out, "gen %u oldest_gen %u data_type ", a->gen, a->oldest_gen);
bch2_prt_data_type(out, a->data_type);
prt_newline(out);
- prt_printf(out, "journal_seq %llu", a->journal_seq);
- prt_newline(out);
- prt_printf(out, "need_discard %llu", BCH_ALLOC_V4_NEED_DISCARD(a));
- prt_newline(out);
- prt_printf(out, "need_inc_gen %llu", BCH_ALLOC_V4_NEED_INC_GEN(a));
- prt_newline(out);
- prt_printf(out, "dirty_sectors %u", a->dirty_sectors);
- prt_newline(out);
- prt_printf(out, "cached_sectors %u", a->cached_sectors);
- prt_newline(out);
- prt_printf(out, "stripe %u", a->stripe);
- prt_newline(out);
- prt_printf(out, "stripe_redundancy %u", a->stripe_redundancy);
- prt_newline(out);
- prt_printf(out, "io_time[READ] %llu", a->io_time[READ]);
- prt_newline(out);
- prt_printf(out, "io_time[WRITE] %llu", a->io_time[WRITE]);
- prt_newline(out);
- prt_printf(out, "fragmentation %llu", a->fragmentation_lru);
- prt_newline(out);
- prt_printf(out, "bp_start %llu", BCH_ALLOC_V4_BACKPOINTERS_START(a));
+ prt_printf(out, "journal_seq %llu\n", a->journal_seq);
+ prt_printf(out, "need_discard %llu\n", BCH_ALLOC_V4_NEED_DISCARD(a));
+ prt_printf(out, "need_inc_gen %llu\n", BCH_ALLOC_V4_NEED_INC_GEN(a));
+ prt_printf(out, "dirty_sectors %u\n", a->dirty_sectors);
+ prt_printf(out, "cached_sectors %u\n", a->cached_sectors);
+ prt_printf(out, "stripe %u\n", a->stripe);
+ prt_printf(out, "stripe_redundancy %u\n", a->stripe_redundancy);
+ prt_printf(out, "io_time[READ] %llu\n", a->io_time[READ]);
+ prt_printf(out, "io_time[WRITE] %llu\n", a->io_time[WRITE]);
+ prt_printf(out, "fragmentation %llu\n", a->fragmentation_lru);
+ prt_printf(out, "bp_start %llu\n", BCH_ALLOC_V4_BACKPOINTERS_START(a));
printbuf_indent_sub(out, 2);
}
@@ -439,22 +429,18 @@ struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct b
}
struct bkey_i_alloc_v4 *
-bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter,
- struct bpos pos)
+bch2_trans_start_alloc_update_noupdate(struct btree_trans *trans, struct btree_iter *iter,
+ struct bpos pos)
{
- struct bkey_s_c k;
- struct bkey_i_alloc_v4 *a;
- int ret;
-
- k = bch2_bkey_get_iter(trans, iter, BTREE_ID_alloc, pos,
- BTREE_ITER_WITH_UPDATES|
- BTREE_ITER_CACHED|
- BTREE_ITER_INTENT);
- ret = bkey_err(k);
+ struct bkey_s_c k = bch2_bkey_get_iter(trans, iter, BTREE_ID_alloc, pos,
+ BTREE_ITER_with_updates|
+ BTREE_ITER_cached|
+ BTREE_ITER_intent);
+ int ret = bkey_err(k);
if (unlikely(ret))
return ERR_PTR(ret);
- a = bch2_alloc_to_v4_mut_inlined(trans, k);
+ struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut_inlined(trans, k);
ret = PTR_ERR_OR_ZERO(a);
if (unlikely(ret))
goto err;
@@ -464,6 +450,20 @@ err:
return ERR_PTR(ret);
}
+__flatten
+struct bkey_i_alloc_v4 *bch2_trans_start_alloc_update(struct btree_trans *trans, struct bpos pos)
+{
+ struct btree_iter iter;
+ struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update_noupdate(trans, &iter, pos);
+ int ret = PTR_ERR_OR_ZERO(a);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
+ bch2_trans_iter_exit(trans, &iter);
+ return unlikely(ret) ? ERR_PTR(ret) : a;
+}
+
static struct bpos alloc_gens_pos(struct bpos pos, unsigned *offset)
{
*offset = pos.offset & KEY_TYPE_BUCKET_GENS_MASK;
@@ -487,7 +487,7 @@ static unsigned alloc_gen(struct bkey_s_c k, unsigned offset)
}
int bch2_bucket_gens_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
struct printbuf *err)
{
int ret = 0;
@@ -520,7 +520,7 @@ int bch2_bucket_gens_init(struct bch_fs *c)
int ret;
ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
- BTREE_ITER_PREFETCH, k, ({
+ BTREE_ITER_prefetch, k, ({
/*
* Not a fsck error because this is checked/repaired by
* bch2_check_alloc_key() which runs later:
@@ -567,29 +567,31 @@ iter_err:
int bch2_alloc_read(struct bch_fs *c)
{
struct btree_trans *trans = bch2_trans_get(c);
+ struct bch_dev *ca = NULL;
int ret;
down_read(&c->gc_lock);
if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) {
ret = for_each_btree_key(trans, iter, BTREE_ID_bucket_gens, POS_MIN,
- BTREE_ITER_PREFETCH, k, ({
+ BTREE_ITER_prefetch, k, ({
u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
if (k.k->type != KEY_TYPE_bucket_gens)
continue;
- const struct bch_bucket_gens *g = bkey_s_c_to_bucket_gens(k).v;
-
+ ca = bch2_dev_iterate(c, ca, k.k->p.inode);
/*
* Not a fsck error because this is checked/repaired by
* bch2_check_alloc_key() which runs later:
*/
- if (!bch2_dev_exists2(c, k.k->p.inode))
+ if (!ca) {
+ bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
continue;
+ }
- struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
+ const struct bch_bucket_gens *g = bkey_s_c_to_bucket_gens(k).v;
for (u64 b = max_t(u64, ca->mi.first_bucket, start);
b < min_t(u64, ca->mi.nbuckets, end);
@@ -599,15 +601,16 @@ int bch2_alloc_read(struct bch_fs *c)
}));
} else {
ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
- BTREE_ITER_PREFETCH, k, ({
+ BTREE_ITER_prefetch, k, ({
+ ca = bch2_dev_iterate(c, ca, k.k->p.inode);
/*
* Not a fsck error because this is checked/repaired by
* bch2_check_alloc_key() which runs later:
*/
- if (!bch2_dev_bucket_exists(c, k.k->p))
+ if (!ca) {
+ bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
continue;
-
- struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
+ }
struct bch_alloc_v4 a;
*bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen;
@@ -615,6 +618,7 @@ int bch2_alloc_read(struct bch_fs *c)
}));
}
+ bch2_dev_put(ca);
bch2_trans_put(trans);
up_read(&c->gc_lock);
@@ -625,12 +629,12 @@ int bch2_alloc_read(struct bch_fs *c)
/* Free space/discard btree: */
static int bch2_bucket_do_index(struct btree_trans *trans,
+ struct bch_dev *ca,
struct bkey_s_c alloc_k,
const struct bch_alloc_v4 *a,
bool set)
{
struct bch_fs *c = trans->c;
- struct bch_dev *ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
struct btree_iter iter;
struct bkey_s_c old;
struct bkey_i *k;
@@ -667,7 +671,7 @@ static int bch2_bucket_do_index(struct btree_trans *trans,
old = bch2_bkey_get_iter(trans, &iter, btree,
bkey_start_pos(&k->k),
- BTREE_ITER_INTENT);
+ BTREE_ITER_intent);
ret = bkey_err(old);
if (ret)
return ret;
@@ -711,8 +715,8 @@ static noinline int bch2_bucket_gen_update(struct btree_trans *trans,
return ret;
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_bucket_gens, pos,
- BTREE_ITER_INTENT|
- BTREE_ITER_WITH_UPDATES);
+ BTREE_ITER_intent|
+ BTREE_ITER_with_updates);
ret = bkey_err(k);
if (ret)
return ret;
@@ -734,26 +738,24 @@ static noinline int bch2_bucket_gen_update(struct btree_trans *trans,
int bch2_trigger_alloc(struct btree_trans *trans,
enum btree_id btree, unsigned level,
struct bkey_s_c old, struct bkey_s new,
- unsigned flags)
+ enum btree_iter_update_trigger_flags flags)
{
struct bch_fs *c = trans->c;
int ret = 0;
- if (bch2_trans_inconsistent_on(!bch2_dev_bucket_exists(c, new.k->p), trans,
- "alloc key for invalid device or bucket"))
+ struct bch_dev *ca = bch2_dev_bucket_tryget(c, new.k->p);
+ if (!ca)
return -EIO;
- struct bch_dev *ca = bch_dev_bkey_exists(c, new.k->p.inode);
-
struct bch_alloc_v4 old_a_convert;
const struct bch_alloc_v4 *old_a = bch2_alloc_to_v4(old, &old_a_convert);
- if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
+ if (flags & BTREE_TRIGGER_transactional) {
struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
- new_a->data_type = alloc_data_type(*new_a, new_a->data_type);
+ alloc_data_type_set(new_a, new_a->data_type);
- if (bch2_bucket_sectors(*new_a) > bch2_bucket_sectors(*old_a)) {
+ if (bch2_bucket_sectors_total(*new_a) > bch2_bucket_sectors_total(*old_a)) {
new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
@@ -770,10 +772,10 @@ int bch2_trigger_alloc(struct btree_trans *trans,
if (old_a->data_type != new_a->data_type ||
(new_a->data_type == BCH_DATA_free &&
alloc_freespace_genbits(*old_a) != alloc_freespace_genbits(*new_a))) {
- ret = bch2_bucket_do_index(trans, old, old_a, false) ?:
- bch2_bucket_do_index(trans, new.s_c, new_a, true);
+ ret = bch2_bucket_do_index(trans, ca, old, old_a, false) ?:
+ bch2_bucket_do_index(trans, ca, new.s_c, new_a, true);
if (ret)
- return ret;
+ goto err;
}
if (new_a->data_type == BCH_DATA_cached &&
@@ -787,24 +789,23 @@ int bch2_trigger_alloc(struct btree_trans *trans,
bucket_to_u64(new.k->p),
old_lru, new_lru);
if (ret)
- return ret;
+ goto err;
}
- new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a,
- bch_dev_bkey_exists(c, new.k->p.inode));
+ new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a, ca);
if (old_a->fragmentation_lru != new_a->fragmentation_lru) {
ret = bch2_lru_change(trans,
BCH_LRU_FRAGMENTATION_START,
bucket_to_u64(new.k->p),
old_a->fragmentation_lru, new_a->fragmentation_lru);
if (ret)
- return ret;
+ goto err;
}
if (old_a->gen != new_a->gen) {
ret = bch2_bucket_gen_update(trans, new.k->p, new_a->gen);
if (ret)
- return ret;
+ goto err;
}
/*
@@ -812,21 +813,21 @@ int bch2_trigger_alloc(struct btree_trans *trans,
* not:
*/
- if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
+ if ((flags & BTREE_TRIGGER_bucket_invalidate) &&
old_a->cached_sectors) {
ret = bch2_update_cached_sectors_list(trans, new.k->p.inode,
-((s64) old_a->cached_sectors));
if (ret)
- return ret;
+ goto err;
}
}
- if ((flags & BTREE_TRIGGER_ATOMIC) && (flags & BTREE_TRIGGER_INSERT)) {
+ if ((flags & BTREE_TRIGGER_atomic) && (flags & BTREE_TRIGGER_insert)) {
struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
u64 journal_seq = trans->journal_res.seq;
u64 bucket_journal_seq = new_a->journal_seq;
- if ((flags & BTREE_TRIGGER_INSERT) &&
+ if ((flags & BTREE_TRIGGER_insert) &&
data_type_is_empty(old_a->data_type) !=
data_type_is_empty(new_a->data_type) &&
new.k->type == KEY_TYPE_alloc_v4) {
@@ -854,7 +855,7 @@ int bch2_trigger_alloc(struct btree_trans *trans,
if (ret) {
bch2_fs_fatal_error(c,
"setting bucket_needs_journal_commit: %s", bch2_err_str(ret));
- return ret;
+ goto err;
}
}
@@ -884,11 +885,11 @@ int bch2_trigger_alloc(struct btree_trans *trans,
bch2_do_invalidates(c);
if (statechange(a->data_type == BCH_DATA_need_gc_gens))
- bch2_do_gc_gens(c);
+ bch2_gc_gens_async(c);
}
- if ((flags & BTREE_TRIGGER_GC) &&
- (flags & BTREE_TRIGGER_BUCKET_INVALIDATE)) {
+ if ((flags & BTREE_TRIGGER_gc) &&
+ (flags & BTREE_TRIGGER_bucket_invalidate)) {
struct bch_alloc_v4 new_a_convert;
const struct bch_alloc_v4 *new_a = bch2_alloc_to_v4(new.s_c, &new_a_convert);
@@ -908,12 +909,13 @@ int bch2_trigger_alloc(struct btree_trans *trans,
bucket_unlock(g);
percpu_up_read(&c->mark_lock);
}
-
- return 0;
+err:
+ bch2_dev_put(ca);
+ return ret;
}
/*
- * This synthesizes deleted extents for holes, similar to BTREE_ITER_SLOTS for
+ * This synthesizes deleted extents for holes, similar to BTREE_ITER_slots for
* extents style btrees, but works on non-extents btrees:
*/
static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
@@ -958,35 +960,34 @@ static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos
}
}
-static bool next_bucket(struct bch_fs *c, struct bpos *bucket)
+static bool next_bucket(struct bch_fs *c, struct bch_dev **ca, struct bpos *bucket)
{
- struct bch_dev *ca;
-
- if (bch2_dev_bucket_exists(c, *bucket))
- return true;
-
- if (bch2_dev_exists2(c, bucket->inode)) {
- ca = bch_dev_bkey_exists(c, bucket->inode);
+ if (*ca) {
+ if (bucket->offset < (*ca)->mi.first_bucket)
+ bucket->offset = (*ca)->mi.first_bucket;
- if (bucket->offset < ca->mi.first_bucket) {
- bucket->offset = ca->mi.first_bucket;
+ if (bucket->offset < (*ca)->mi.nbuckets)
return true;
- }
+ bch2_dev_put(*ca);
+ *ca = NULL;
bucket->inode++;
bucket->offset = 0;
}
rcu_read_lock();
- ca = __bch2_next_dev_idx(c, bucket->inode, NULL);
- if (ca)
- *bucket = POS(ca->dev_idx, ca->mi.first_bucket);
+ *ca = __bch2_next_dev_idx(c, bucket->inode, NULL);
+ if (*ca) {
+ *bucket = POS((*ca)->dev_idx, (*ca)->mi.first_bucket);
+ bch2_dev_get(*ca);
+ }
rcu_read_unlock();
- return ca != NULL;
+ return *ca != NULL;
}
-static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter, struct bkey *hole)
+static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter,
+ struct bch_dev **ca, struct bkey *hole)
{
struct bch_fs *c = iter->trans->c;
struct bkey_s_c k;
@@ -995,22 +996,21 @@ again:
if (bkey_err(k))
return k;
+ *ca = bch2_dev_iterate_noerror(c, *ca, k.k->p.inode);
+
if (!k.k->type) {
- struct bpos bucket = bkey_start_pos(k.k);
+ struct bpos hole_start = bkey_start_pos(k.k);
- if (!bch2_dev_bucket_exists(c, bucket)) {
- if (!next_bucket(c, &bucket))
+ if (!*ca || !bucket_valid(*ca, hole_start.offset)) {
+ if (!next_bucket(c, ca, &hole_start))
return bkey_s_c_null;
- bch2_btree_iter_set_pos(iter, bucket);
+ bch2_btree_iter_set_pos(iter, hole_start);
goto again;
}
- if (!bch2_dev_bucket_exists(c, k.k->p)) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
-
- bch2_key_resize(hole, ca->mi.nbuckets - bucket.offset);
- }
+ if (k.k->p.offset > (*ca)->mi.nbuckets)
+ bch2_key_resize(hole, (*ca)->mi.nbuckets - hole_start.offset);
}
return k;
@@ -1025,24 +1025,25 @@ int bch2_check_alloc_key(struct btree_trans *trans,
struct btree_iter *bucket_gens_iter)
{
struct bch_fs *c = trans->c;
- struct bch_dev *ca;
struct bch_alloc_v4 a_convert;
const struct bch_alloc_v4 *a;
unsigned discard_key_type, freespace_key_type;
unsigned gens_offset;
struct bkey_s_c k;
struct printbuf buf = PRINTBUF;
- int ret;
+ int ret = 0;
- if (fsck_err_on(!bch2_dev_bucket_exists(c, alloc_k.k->p), c,
- alloc_key_to_missing_dev_bucket,
+ struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, alloc_k.k->p);
+ if (fsck_err_on(!ca,
+ c, alloc_key_to_missing_dev_bucket,
"alloc key for invalid device:bucket %llu:%llu",
alloc_k.k->p.inode, alloc_k.k->p.offset))
- return bch2_btree_delete_at(trans, alloc_iter, 0);
+ ret = bch2_btree_delete_at(trans, alloc_iter, 0);
+ if (!ca)
+ return ret;
- ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
if (!ca->mi.freespace_initialized)
- return 0;
+ goto out;
a = bch2_alloc_to_v4(alloc_k, &a_convert);
@@ -1141,25 +1142,26 @@ int bch2_check_alloc_key(struct btree_trans *trans,
if (ret)
goto err;
}
+out:
err:
fsck_err:
+ bch2_dev_put(ca);
printbuf_exit(&buf);
return ret;
}
static noinline_for_stack
int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
+ struct bch_dev *ca,
struct bpos start,
struct bpos *end,
struct btree_iter *freespace_iter)
{
struct bch_fs *c = trans->c;
- struct bch_dev *ca;
struct bkey_s_c k;
struct printbuf buf = PRINTBUF;
int ret;
- ca = bch_dev_bkey_exists(c, start.inode);
if (!ca->mi.freespace_initialized)
return 0;
@@ -1313,7 +1315,7 @@ static noinline_for_stack int bch2_check_discard_freespace_key(struct btree_tran
goto delete;
out:
fsck_err:
- set_btree_iter_dontneed(&alloc_iter);
+ bch2_set_btree_iter_dontneed(&alloc_iter);
bch2_trans_iter_exit(trans, &alloc_iter);
printbuf_exit(&buf);
return ret;
@@ -1337,30 +1339,25 @@ int bch2_check_bucket_gens_key(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct bkey_i_bucket_gens g;
- struct bch_dev *ca;
u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
u64 b;
- bool need_update = false, dev_exists;
+ bool need_update = false;
struct printbuf buf = PRINTBUF;
int ret = 0;
BUG_ON(k.k->type != KEY_TYPE_bucket_gens);
bkey_reassemble(&g.k_i, k);
- /* if no bch_dev, skip out whether we repair or not */
- dev_exists = bch2_dev_exists2(c, k.k->p.inode);
- if (!dev_exists) {
- if (fsck_err_on(!dev_exists, c,
- bucket_gens_to_invalid_dev,
- "bucket_gens key for invalid device:\n %s",
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ struct bch_dev *ca = bch2_dev_tryget_noerror(c, k.k->p.inode);
+ if (!ca) {
+ if (fsck_err(c, bucket_gens_to_invalid_dev,
+ "bucket_gens key for invalid device:\n %s",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
ret = bch2_btree_delete_at(trans, iter, 0);
- }
goto out;
}
- ca = bch_dev_bkey_exists(c, k.k->p.inode);
if (fsck_err_on(end <= ca->mi.first_bucket ||
start >= ca->mi.nbuckets, c,
bucket_gens_to_invalid_buckets,
@@ -1398,6 +1395,7 @@ int bch2_check_bucket_gens_key(struct btree_trans *trans,
}
out:
fsck_err:
+ bch2_dev_put(ca);
printbuf_exit(&buf);
return ret;
}
@@ -1406,25 +1404,26 @@ int bch2_check_alloc_info(struct bch_fs *c)
{
struct btree_trans *trans = bch2_trans_get(c);
struct btree_iter iter, discard_iter, freespace_iter, bucket_gens_iter;
+ struct bch_dev *ca = NULL;
struct bkey hole;
struct bkey_s_c k;
int ret = 0;
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS_MIN,
- BTREE_ITER_PREFETCH);
+ BTREE_ITER_prefetch);
bch2_trans_iter_init(trans, &discard_iter, BTREE_ID_need_discard, POS_MIN,
- BTREE_ITER_PREFETCH);
+ BTREE_ITER_prefetch);
bch2_trans_iter_init(trans, &freespace_iter, BTREE_ID_freespace, POS_MIN,
- BTREE_ITER_PREFETCH);
+ BTREE_ITER_prefetch);
bch2_trans_iter_init(trans, &bucket_gens_iter, BTREE_ID_bucket_gens, POS_MIN,
- BTREE_ITER_PREFETCH);
+ BTREE_ITER_prefetch);
while (1) {
struct bpos next;
bch2_trans_begin(trans);
- k = bch2_get_key_or_real_bucket_hole(&iter, &hole);
+ k = bch2_get_key_or_real_bucket_hole(&iter, &ca, &hole);
ret = bkey_err(k);
if (ret)
goto bkey_err;
@@ -1445,7 +1444,7 @@ int bch2_check_alloc_info(struct bch_fs *c)
} else {
next = k.k->p;
- ret = bch2_check_alloc_hole_freespace(trans,
+ ret = bch2_check_alloc_hole_freespace(trans, ca,
bkey_start_pos(k.k),
&next,
&freespace_iter) ?:
@@ -1473,19 +1472,21 @@ bkey_err:
bch2_trans_iter_exit(trans, &freespace_iter);
bch2_trans_iter_exit(trans, &discard_iter);
bch2_trans_iter_exit(trans, &iter);
+ bch2_dev_put(ca);
+ ca = NULL;
if (ret < 0)
goto err;
ret = for_each_btree_key(trans, iter,
BTREE_ID_need_discard, POS_MIN,
- BTREE_ITER_PREFETCH, k,
+ BTREE_ITER_prefetch, k,
bch2_check_discard_freespace_key(trans, &iter));
if (ret)
goto err;
bch2_trans_iter_init(trans, &iter, BTREE_ID_freespace, POS_MIN,
- BTREE_ITER_PREFETCH);
+ BTREE_ITER_prefetch);
while (1) {
bch2_trans_begin(trans);
k = bch2_btree_iter_peek(&iter);
@@ -1515,7 +1516,7 @@ bkey_err:
ret = for_each_btree_key_commit(trans, iter,
BTREE_ID_bucket_gens, POS_MIN,
- BTREE_ITER_PREFETCH, k,
+ BTREE_ITER_prefetch, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
bch2_check_bucket_gens_key(trans, &iter, k));
err:
@@ -1562,7 +1563,7 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
a_mut->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
ret = bch2_trans_update(trans, alloc_iter,
- &a_mut->k_i, BTREE_TRIGGER_NORUN);
+ &a_mut->k_i, BTREE_TRIGGER_norun);
if (ret)
goto err;
@@ -1601,7 +1602,7 @@ int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
{
int ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
- POS_MIN, BTREE_ITER_PREFETCH, k,
+ POS_MIN, BTREE_ITER_prefetch, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
bch2_check_alloc_to_lru_ref(trans, &iter)));
bch_err_fn(c, ret);
@@ -1657,9 +1658,7 @@ static void discard_buckets_next_dev(struct bch_fs *c, struct discard_buckets_st
bch2_journal_flush_async(&c->journal, NULL);
if (s->ca)
- percpu_ref_put(&s->ca->ref);
- if (ca)
- percpu_ref_get(&ca->ref);
+ percpu_ref_put(&s->ca->io_ref);
s->ca = ca;
s->need_journal_commit_this_dev = 0;
}
@@ -1673,15 +1672,15 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
struct bpos pos = need_discard_iter->pos;
struct btree_iter iter = { NULL };
struct bkey_s_c k;
- struct bch_dev *ca;
struct bkey_i_alloc_v4 *a;
struct printbuf buf = PRINTBUF;
bool discard_locked = false;
int ret = 0;
- ca = bch_dev_bkey_exists(c, pos.inode);
-
- if (!percpu_ref_tryget(&ca->io_ref)) {
+ struct bch_dev *ca = s->ca && s->ca->dev_idx == pos.inode
+ ? s->ca
+ : bch2_dev_get_ioref(c, pos.inode, WRITE);
+ if (!ca) {
bch2_btree_iter_set_pos(need_discard_iter, POS(pos.inode + 1, 0));
return 0;
}
@@ -1703,7 +1702,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
need_discard_iter->pos,
- BTREE_ITER_CACHED);
+ BTREE_ITER_cached);
ret = bkey_err(k);
if (ret)
goto out;
@@ -1713,34 +1712,37 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
if (ret)
goto out;
- if (BCH_ALLOC_V4_NEED_INC_GEN(&a->v)) {
- a->v.gen++;
- SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
- goto write;
- }
-
- if (a->v.journal_seq > c->journal.flushed_seq_ondisk) {
- if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
- bch2_trans_inconsistent(trans,
- "clearing need_discard but journal_seq %llu > flushed_seq %llu\n"
- "%s",
- a->v.journal_seq,
- c->journal.flushed_seq_ondisk,
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+ if (bch2_bucket_sectors_total(a->v)) {
+ if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info,
+ trans, "attempting to discard bucket with dirty data\n%s",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
ret = -EIO;
- }
goto out;
}
if (a->v.data_type != BCH_DATA_need_discard) {
- if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
- bch2_trans_inconsistent(trans,
- "bucket incorrectly set in need_discard btree\n"
- "%s",
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
- ret = -EIO;
+ if (data_type_is_empty(a->v.data_type) &&
+ BCH_ALLOC_V4_NEED_INC_GEN(&a->v)) {
+ a->v.gen++;
+ SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
+ goto write;
}
+ if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info,
+ trans, "bucket incorrectly set in need_discard btree\n"
+ "%s",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+ ret = -EIO;
+ goto out;
+ }
+
+ if (a->v.journal_seq > c->journal.flushed_seq_ondisk) {
+ if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info,
+ trans, "clearing need_discard but journal_seq %llu > flushed_seq %llu\n%s",
+ a->v.journal_seq,
+ c->journal.flushed_seq_ondisk,
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+ ret = -EIO;
goto out;
}
@@ -1768,7 +1770,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
}
SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
- a->v.data_type = alloc_data_type(a->v, a->v.data_type);
+ alloc_data_type_set(&a->v, a->v.data_type);
write:
ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
bch2_trans_commit(trans, NULL, NULL,
@@ -1784,7 +1786,6 @@ out:
discard_in_flight_remove(c, iter.pos);
s->seen++;
bch2_trans_iter_exit(trans, &iter);
- percpu_ref_put(&ca->io_ref);
printbuf_exit(&buf);
return ret;
}
@@ -1824,7 +1825,7 @@ void bch2_do_discards(struct bch_fs *c)
static int bch2_clear_bucket_needs_discard(struct btree_trans *trans, struct bpos bucket)
{
struct btree_iter iter;
- bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, bucket, BTREE_ITER_INTENT);
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, bucket, BTREE_ITER_intent);
struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter);
int ret = bkey_err(k);
if (ret)
@@ -1835,8 +1836,9 @@ static int bch2_clear_bucket_needs_discard(struct btree_trans *trans, struct bpo
if (ret)
goto err;
+ BUG_ON(a->v.dirty_sectors);
SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
- a->v.data_type = alloc_data_type(a->v, a->v.data_type);
+ alloc_data_type_set(&a->v, a->v.data_type);
ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
err:
@@ -1858,9 +1860,8 @@ static void bch2_do_discards_fast_work(struct work_struct *work)
if (i->snapshot)
continue;
- ca = bch_dev_bkey_exists(c, i->inode);
-
- if (!percpu_ref_tryget(&ca->io_ref)) {
+ ca = bch2_dev_get_ioref(c, i->inode, WRITE);
+ if (!ca) {
darray_remove_item(&c->discard_buckets_in_flight, i);
continue;
}
@@ -1899,9 +1900,12 @@ static void bch2_do_discards_fast_work(struct work_struct *work)
static void bch2_discard_one_bucket_fast(struct bch_fs *c, struct bpos bucket)
{
- struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
+ rcu_read_lock();
+ struct bch_dev *ca = bch2_dev_rcu(c, bucket.inode);
+ bool dead = !ca || percpu_ref_is_dying(&ca->io_ref);
+ rcu_read_unlock();
- if (!percpu_ref_is_dying(&ca->io_ref) &&
+ if (!dead &&
!discard_in_flight_add(c, bucket) &&
bch2_write_ref_tryget(c, BCH_WRITE_REF_discard_fast) &&
!queue_work(c->write_ref_wq, &c->discard_fast_work))
@@ -1914,7 +1918,6 @@ static int invalidate_one_bucket(struct btree_trans *trans,
s64 *nr_to_invalidate)
{
struct bch_fs *c = trans->c;
- struct btree_iter alloc_iter = { NULL };
struct bkey_i_alloc_v4 *a = NULL;
struct printbuf buf = PRINTBUF;
struct bpos bucket = u64_to_bucket(lru_k.k->p.offset);
@@ -1932,7 +1935,7 @@ static int invalidate_one_bucket(struct btree_trans *trans,
if (bch2_bucket_is_open_safe(c, bucket.inode, bucket.offset))
return 0;
- a = bch2_trans_start_alloc_update(trans, &alloc_iter, bucket);
+ a = bch2_trans_start_alloc_update(trans, bucket);
ret = PTR_ERR_OR_ZERO(a);
if (ret)
goto out;
@@ -1942,6 +1945,7 @@ static int invalidate_one_bucket(struct btree_trans *trans,
goto out;
BUG_ON(a->v.data_type != BCH_DATA_cached);
+ BUG_ON(a->v.dirty_sectors);
if (!a->v.cached_sectors)
bch_err(c, "invalidating empty bucket, confused");
@@ -1956,18 +1960,15 @@ static int invalidate_one_bucket(struct btree_trans *trans,
a->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
a->v.io_time[WRITE] = atomic64_read(&c->io_clock[WRITE].now);
- ret = bch2_trans_update(trans, &alloc_iter, &a->k_i,
- BTREE_TRIGGER_BUCKET_INVALIDATE) ?:
- bch2_trans_commit(trans, NULL, NULL,
- BCH_WATERMARK_btree|
- BCH_TRANS_COMMIT_no_enospc);
+ ret = bch2_trans_commit(trans, NULL, NULL,
+ BCH_WATERMARK_btree|
+ BCH_TRANS_COMMIT_no_enospc);
if (ret)
goto out;
trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors);
--*nr_to_invalidate;
out:
- bch2_trans_iter_exit(trans, &alloc_iter);
printbuf_exit(&buf);
return ret;
err:
@@ -2009,11 +2010,11 @@ static void bch2_do_invalidates_work(struct work_struct *work)
ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru,
lru_pos(ca->dev_idx, 0, 0),
lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX),
- BTREE_ITER_INTENT, k,
+ BTREE_ITER_intent, k,
invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate));
if (ret < 0) {
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
break;
}
}
@@ -2046,7 +2047,7 @@ int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
POS(ca->dev_idx, max_t(u64, ca->mi.first_bucket, bucket_start)),
- BTREE_ITER_PREFETCH);
+ BTREE_ITER_prefetch);
/*
* Scan the alloc btree for every bucket on @ca, and add buckets to the
* freespace/need_discard/need_gc_gens btrees as needed:
@@ -2078,7 +2079,7 @@ int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
struct bch_alloc_v4 a_convert;
const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
- ret = bch2_bucket_do_index(trans, k, a, true) ?:
+ ret = bch2_bucket_do_index(trans, ca, k, a, true) ?:
bch2_trans_commit(trans, NULL, NULL,
BCH_TRANS_COMMIT_no_enospc);
if (ret)
@@ -2150,7 +2151,7 @@ int bch2_fs_freespace_init(struct bch_fs *c)
ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets);
if (ret) {
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
bch_err_fn(c, ret);
return ret;
}
@@ -2177,7 +2178,10 @@ int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
u64 now;
int ret = 0;
- a = bch2_trans_start_alloc_update(trans, &iter, POS(dev, bucket_nr));
+ if (bch2_trans_relock(trans))
+ bch2_trans_begin(trans);
+
+ a = bch2_trans_start_alloc_update_noupdate(trans, &iter, POS(dev, bucket_nr));
ret = PTR_ERR_OR_ZERO(a);
if (ret)
return ret;
diff --git a/fs/bcachefs/alloc_background.h b/fs/bcachefs/alloc_background.h
index 052b2fac25d6..ae31a94be6f9 100644
--- a/fs/bcachefs/alloc_background.h
+++ b/fs/bcachefs/alloc_background.h
@@ -8,21 +8,18 @@
#include "debug.h"
#include "super.h"
-enum bkey_invalid_flags;
+enum bch_validate_flags;
/* How out of date a pointer gen is allowed to be: */
#define BUCKET_GC_GEN_MAX 96U
static inline bool bch2_dev_bucket_exists(struct bch_fs *c, struct bpos pos)
{
- struct bch_dev *ca;
-
- if (!bch2_dev_exists2(c, pos.inode))
- return false;
-
- ca = bch_dev_bkey_exists(c, pos.inode);
- return pos.offset >= ca->mi.first_bucket &&
- pos.offset < ca->mi.nbuckets;
+ rcu_read_lock();
+ struct bch_dev *ca = bch2_dev_rcu(c, pos.inode);
+ bool ret = ca && bucket_valid(ca, pos.offset);
+ rcu_read_unlock();
+ return ret;
}
static inline u64 bucket_to_u64(struct bpos bucket)
@@ -40,38 +37,50 @@ static inline u8 alloc_gc_gen(struct bch_alloc_v4 a)
return a.gen - a.oldest_gen;
}
-static inline enum bch_data_type __alloc_data_type(u32 dirty_sectors,
- u32 cached_sectors,
- u32 stripe,
- struct bch_alloc_v4 a,
- enum bch_data_type data_type)
+static inline void alloc_to_bucket(struct bucket *dst, struct bch_alloc_v4 src)
{
- if (stripe)
- return data_type == BCH_DATA_parity ? data_type : BCH_DATA_stripe;
- if (dirty_sectors)
- return data_type;
- if (cached_sectors)
- return BCH_DATA_cached;
- if (BCH_ALLOC_V4_NEED_DISCARD(&a))
- return BCH_DATA_need_discard;
- if (alloc_gc_gen(a) >= BUCKET_GC_GEN_MAX)
- return BCH_DATA_need_gc_gens;
- return BCH_DATA_free;
+ dst->gen = src.gen;
+ dst->data_type = src.data_type;
+ dst->dirty_sectors = src.dirty_sectors;
+ dst->cached_sectors = src.cached_sectors;
+ dst->stripe = src.stripe;
}
-static inline enum bch_data_type alloc_data_type(struct bch_alloc_v4 a,
- enum bch_data_type data_type)
+static inline void __bucket_m_to_alloc(struct bch_alloc_v4 *dst, struct bucket src)
+{
+ dst->gen = src.gen;
+ dst->data_type = src.data_type;
+ dst->dirty_sectors = src.dirty_sectors;
+ dst->cached_sectors = src.cached_sectors;
+ dst->stripe = src.stripe;
+}
+
+static inline struct bch_alloc_v4 bucket_m_to_alloc(struct bucket b)
{
- return __alloc_data_type(a.dirty_sectors, a.cached_sectors,
- a.stripe, a, data_type);
+ struct bch_alloc_v4 ret = {};
+ __bucket_m_to_alloc(&ret, b);
+ return ret;
}
static inline enum bch_data_type bucket_data_type(enum bch_data_type data_type)
{
- return data_type == BCH_DATA_stripe ? BCH_DATA_user : data_type;
+ switch (data_type) {
+ case BCH_DATA_cached:
+ case BCH_DATA_stripe:
+ return BCH_DATA_user;
+ default:
+ return data_type;
+ }
+}
+
+static inline bool bucket_data_type_mismatch(enum bch_data_type bucket,
+ enum bch_data_type ptr)
+{
+ return !data_type_is_empty(bucket) &&
+ bucket_data_type(bucket) != bucket_data_type(ptr);
}
-static inline unsigned bch2_bucket_sectors(struct bch_alloc_v4 a)
+static inline unsigned bch2_bucket_sectors_total(struct bch_alloc_v4 a)
{
return a.dirty_sectors + a.cached_sectors;
}
@@ -89,6 +98,27 @@ static inline unsigned bch2_bucket_sectors_fragmented(struct bch_dev *ca,
return d ? max(0, ca->mi.bucket_size - d) : 0;
}
+static inline enum bch_data_type alloc_data_type(struct bch_alloc_v4 a,
+ enum bch_data_type data_type)
+{
+ if (a.stripe)
+ return data_type == BCH_DATA_parity ? data_type : BCH_DATA_stripe;
+ if (a.dirty_sectors)
+ return data_type;
+ if (a.cached_sectors)
+ return BCH_DATA_cached;
+ if (BCH_ALLOC_V4_NEED_DISCARD(&a))
+ return BCH_DATA_need_discard;
+ if (alloc_gc_gen(a) >= BUCKET_GC_GEN_MAX)
+ return BCH_DATA_need_gc_gens;
+ return BCH_DATA_free;
+}
+
+static inline void alloc_data_type_set(struct bch_alloc_v4 *a, enum bch_data_type data_type)
+{
+ a->data_type = alloc_data_type(*a, data_type);
+}
+
static inline u64 alloc_lru_idx_read(struct bch_alloc_v4 a)
{
return a.data_type == BCH_DATA_cached ? a.io_time[READ] : 0;
@@ -126,13 +156,17 @@ static inline struct bpos alloc_freespace_pos(struct bpos pos, struct bch_alloc_
return pos;
}
-static inline unsigned alloc_v4_u64s(const struct bch_alloc_v4 *a)
+static inline unsigned alloc_v4_u64s_noerror(const struct bch_alloc_v4 *a)
{
- unsigned ret = (BCH_ALLOC_V4_BACKPOINTERS_START(a) ?:
+ return (BCH_ALLOC_V4_BACKPOINTERS_START(a) ?:
BCH_ALLOC_V4_U64s_V0) +
BCH_ALLOC_V4_NR_BACKPOINTERS(a) *
(sizeof(struct bch_backpointer) / sizeof(u64));
+}
+static inline unsigned alloc_v4_u64s(const struct bch_alloc_v4 *a)
+{
+ unsigned ret = alloc_v4_u64s_noerror(a);
BUG_ON(ret > U8_MAX - BKEY_U64s);
return ret;
}
@@ -143,7 +177,9 @@ static inline void set_alloc_v4_u64s(struct bkey_i_alloc_v4 *a)
}
struct bkey_i_alloc_v4 *
-bch2_trans_start_alloc_update(struct btree_trans *, struct btree_iter *, struct bpos);
+bch2_trans_start_alloc_update_noupdate(struct btree_trans *, struct btree_iter *, struct bpos);
+struct bkey_i_alloc_v4 *
+bch2_trans_start_alloc_update(struct btree_trans *, struct bpos);
void __bch2_alloc_to_v4(struct bkey_s_c, struct bch_alloc_v4 *);
@@ -169,13 +205,13 @@ struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *, struct bkey_s
int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int);
int bch2_alloc_v1_invalid(struct bch_fs *, struct bkey_s_c,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
int bch2_alloc_v2_invalid(struct bch_fs *, struct bkey_s_c,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
int bch2_alloc_v3_invalid(struct bch_fs *, struct bkey_s_c,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
int bch2_alloc_v4_invalid(struct bch_fs *, struct bkey_s_c,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
void bch2_alloc_v4_swab(struct bkey_s);
void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
@@ -209,7 +245,7 @@ void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
})
int bch2_bucket_gens_invalid(struct bch_fs *, struct bkey_s_c,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
void bch2_bucket_gens_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
#define bch2_bkey_ops_bucket_gens ((struct bkey_ops) { \
@@ -229,7 +265,8 @@ static inline bool bkey_is_alloc(const struct bkey *k)
int bch2_alloc_read(struct bch_fs *);
int bch2_trigger_alloc(struct btree_trans *, enum btree_id, unsigned,
- struct bkey_s_c, struct bkey_s, unsigned);
+ struct bkey_s_c, struct bkey_s,
+ enum btree_iter_update_trigger_flags);
int bch2_check_alloc_info(struct bch_fs *);
int bch2_check_alloc_to_lru_refs(struct bch_fs *);
void bch2_do_discards(struct bch_fs *);
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c
index 214b15c84d1f..927a5f300b30 100644
--- a/fs/bcachefs/alloc_foreground.c
+++ b/fs/bcachefs/alloc_foreground.c
@@ -71,7 +71,7 @@ void bch2_reset_alloc_cursors(struct bch_fs *c)
{
rcu_read_lock();
for_each_member_device_rcu(c, ca, NULL)
- ca->alloc_cursor = 0;
+ memset(ca->alloc_cursor, 0, sizeof(ca->alloc_cursor));
rcu_read_unlock();
}
@@ -100,7 +100,7 @@ static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *o
void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
{
- struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+ struct bch_dev *ca = ob_dev(c, ob);
if (ob->ec) {
ec_stripe_new_put(c, ob->ec, STRIPE_REF_io);
@@ -188,8 +188,10 @@ long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
static inline unsigned open_buckets_reserved(enum bch_watermark watermark)
{
switch (watermark) {
- case BCH_WATERMARK_reclaim:
+ case BCH_WATERMARK_interior_updates:
return 0;
+ case BCH_WATERMARK_reclaim:
+ return OPEN_BUCKETS_COUNT / 6;
case BCH_WATERMARK_btree:
case BCH_WATERMARK_btree_copygc:
return OPEN_BUCKETS_COUNT / 4;
@@ -298,7 +300,7 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
k = bch2_bkey_get_iter(trans, &iter,
BTREE_ID_alloc, POS(ca->dev_idx, b),
- BTREE_ITER_CACHED);
+ BTREE_ITER_cached);
ret = bkey_err(k);
if (ret) {
ob = ERR_PTR(ret);
@@ -340,9 +342,9 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
struct bch_backpointer bp;
struct bpos bp_pos = POS_MIN;
- ret = bch2_get_next_backpointer(trans, POS(ca->dev_idx, b), -1,
+ ret = bch2_get_next_backpointer(trans, ca, POS(ca->dev_idx, b), -1,
&bp_pos, &bp,
- BTREE_ITER_NOPRESERVE);
+ BTREE_ITER_nopreserve);
if (ret) {
ob = ERR_PTR(ret);
goto err;
@@ -361,10 +363,10 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
ob = __try_alloc_bucket(c, ca, b, watermark, a, s, cl);
if (!ob)
- set_btree_iter_dontneed(&iter);
+ bch2_set_btree_iter_dontneed(&iter);
err:
if (iter.path)
- set_btree_iter_dontneed(&iter);
+ bch2_set_btree_iter_dontneed(&iter);
bch2_trans_iter_exit(trans, &iter);
printbuf_exit(&buf);
return ob;
@@ -387,7 +389,8 @@ bch2_bucket_alloc_early(struct btree_trans *trans,
struct bkey_s_c k, ck;
struct open_bucket *ob = NULL;
u64 first_bucket = max_t(u64, ca->mi.first_bucket, ca->new_fs_bucket_idx);
- u64 alloc_start = max(first_bucket, READ_ONCE(ca->alloc_cursor));
+ u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap];
+ u64 alloc_start = max(first_bucket, *dev_alloc_cursor);
u64 alloc_cursor = alloc_start;
int ret;
@@ -402,9 +405,8 @@ bch2_bucket_alloc_early(struct btree_trans *trans,
*/
again:
for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor),
- BTREE_ITER_SLOTS, k, ret) {
- struct bch_alloc_v4 a_convert;
- const struct bch_alloc_v4 *a;
+ BTREE_ITER_slots, k, ret) {
+ u64 bucket = k.k->p.offset;
if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)))
break;
@@ -413,12 +415,29 @@ again:
is_superblock_bucket(ca, k.k->p.offset))
continue;
- a = bch2_alloc_to_v4(k, &a_convert);
+ if (s->btree_bitmap != BTREE_BITMAP_ANY &&
+ s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca,
+ bucket_to_sector(ca, bucket), ca->mi.bucket_size)) {
+ if (s->btree_bitmap == BTREE_BITMAP_YES &&
+ bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift)
+ break;
+
+ bucket = sector_to_bucket(ca,
+ round_up(bucket_to_sector(ca, bucket) + 1,
+ 1ULL << ca->mi.btree_bitmap_shift));
+ bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, bucket));
+ s->buckets_seen++;
+ s->skipped_mi_btree_bitmap++;
+ continue;
+ }
+
+ struct bch_alloc_v4 a_convert;
+ const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
if (a->data_type != BCH_DATA_free)
continue;
/* now check the cached key to serialize concurrent allocs of the bucket */
- ck = bch2_bkey_get_iter(trans, &citer, BTREE_ID_alloc, k.k->p, BTREE_ITER_CACHED);
+ ck = bch2_bkey_get_iter(trans, &citer, BTREE_ID_alloc, k.k->p, BTREE_ITER_cached);
ret = bkey_err(ck);
if (ret)
break;
@@ -431,7 +450,7 @@ again:
ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, watermark, a, s, cl);
next:
- set_btree_iter_dontneed(&citer);
+ bch2_set_btree_iter_dontneed(&citer);
bch2_trans_iter_exit(trans, &citer);
if (ob)
break;
@@ -439,7 +458,6 @@ next:
bch2_trans_iter_exit(trans, &iter);
alloc_cursor = iter.pos.offset;
- ca->alloc_cursor = alloc_cursor;
if (!ob && ret)
ob = ERR_PTR(ret);
@@ -449,6 +467,8 @@ next:
goto again;
}
+ *dev_alloc_cursor = alloc_cursor;
+
return ob;
}
@@ -461,7 +481,8 @@ static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
struct btree_iter iter;
struct bkey_s_c k;
struct open_bucket *ob = NULL;
- u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(ca->alloc_cursor));
+ u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap];
+ u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(*dev_alloc_cursor));
u64 alloc_cursor = alloc_start;
int ret;
@@ -483,10 +504,30 @@ again:
s->buckets_seen++;
+ u64 bucket = alloc_cursor & ~(~0ULL << 56);
+ if (s->btree_bitmap != BTREE_BITMAP_ANY &&
+ s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca,
+ bucket_to_sector(ca, bucket), ca->mi.bucket_size)) {
+ if (s->btree_bitmap == BTREE_BITMAP_YES &&
+ bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift)
+ goto fail;
+
+ bucket = sector_to_bucket(ca,
+ round_up(bucket_to_sector(ca, bucket) + 1,
+ 1ULL << ca->mi.btree_bitmap_shift));
+ u64 genbits = alloc_cursor >> 56;
+ alloc_cursor = bucket | (genbits << 56);
+
+ if (alloc_cursor > k.k->p.offset)
+ bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, alloc_cursor));
+ s->skipped_mi_btree_bitmap++;
+ continue;
+ }
+
ob = try_alloc_bucket(trans, ca, watermark,
alloc_cursor, s, k, cl);
if (ob) {
- set_btree_iter_dontneed(&iter);
+ bch2_set_btree_iter_dontneed(&iter);
break;
}
}
@@ -494,10 +535,9 @@ again:
if (ob || ret)
break;
}
+fail:
bch2_trans_iter_exit(trans, &iter);
- ca->alloc_cursor = alloc_cursor;
-
if (!ob && ret)
ob = ERR_PTR(ret);
@@ -506,14 +546,56 @@ again:
goto again;
}
+ *dev_alloc_cursor = alloc_cursor;
+
return ob;
}
+static noinline void trace_bucket_alloc2(struct bch_fs *c, struct bch_dev *ca,
+ enum bch_watermark watermark,
+ enum bch_data_type data_type,
+ struct closure *cl,
+ struct bch_dev_usage *usage,
+ struct bucket_alloc_state *s,
+ struct open_bucket *ob)
+{
+ struct printbuf buf = PRINTBUF;
+
+ printbuf_tabstop_push(&buf, 24);
+
+ prt_printf(&buf, "dev\t%s (%u)\n", ca->name, ca->dev_idx);
+ prt_printf(&buf, "watermark\t%s\n", bch2_watermarks[watermark]);
+ prt_printf(&buf, "data type\t%s\n", __bch2_data_types[data_type]);
+ prt_printf(&buf, "blocking\t%u\n", cl != NULL);
+ prt_printf(&buf, "free\t%llu\n", usage->d[BCH_DATA_free].buckets);
+ prt_printf(&buf, "avail\t%llu\n", dev_buckets_free(ca, *usage, watermark));
+ prt_printf(&buf, "copygc_wait\t%lu/%lli\n",
+ bch2_copygc_wait_amount(c),
+ c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now));
+ prt_printf(&buf, "seen\t%llu\n", s->buckets_seen);
+ prt_printf(&buf, "open\t%llu\n", s->skipped_open);
+ prt_printf(&buf, "need journal commit\t%llu\n", s->skipped_need_journal_commit);
+ prt_printf(&buf, "nocow\t%llu\n", s->skipped_nocow);
+ prt_printf(&buf, "nouse\t%llu\n", s->skipped_nouse);
+ prt_printf(&buf, "mi_btree_bitmap\t%llu\n", s->skipped_mi_btree_bitmap);
+
+ if (!IS_ERR(ob)) {
+ prt_printf(&buf, "allocated\t%llu\n", ob->bucket);
+ trace_bucket_alloc(c, buf.buf);
+ } else {
+ prt_printf(&buf, "err\t%s\n", bch2_err_str(PTR_ERR(ob)));
+ trace_bucket_alloc_fail(c, buf.buf);
+ }
+
+ printbuf_exit(&buf);
+}
+
/**
* bch2_bucket_alloc_trans - allocate a single bucket from a specific device
* @trans: transaction object
* @ca: device to allocate from
* @watermark: how important is this allocation?
+ * @data_type: BCH_DATA_journal, btree, user...
* @cl: if not NULL, closure to be used to wait if buckets not available
* @usage: for secondarily also returning the current device usage
*
@@ -522,6 +604,7 @@ again:
static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
struct bch_dev *ca,
enum bch_watermark watermark,
+ enum bch_data_type data_type,
struct closure *cl,
struct bch_dev_usage *usage)
{
@@ -529,7 +612,9 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
struct open_bucket *ob = NULL;
bool freespace = READ_ONCE(ca->mi.freespace_initialized);
u64 avail;
- struct bucket_alloc_state s = { 0 };
+ struct bucket_alloc_state s = {
+ .btree_bitmap = data_type == BCH_DATA_btree,
+ };
bool waiting = false;
again:
bch2_dev_usage_read_fast(ca, usage);
@@ -539,7 +624,7 @@ again:
bch2_do_discards(c);
if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
- bch2_do_gc_gens(c);
+ bch2_gc_gens_async(c);
if (should_invalidate_buckets(ca, *usage))
bch2_do_invalidates(c);
@@ -567,6 +652,11 @@ alloc:
if (s.skipped_need_journal_commit * 2 > avail)
bch2_journal_flush_async(&c->journal, NULL);
+ if (!ob && s.btree_bitmap != BTREE_BITMAP_ANY) {
+ s.btree_bitmap = BTREE_BITMAP_ANY;
+ goto alloc;
+ }
+
if (!ob && freespace && c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) {
freespace = false;
goto alloc;
@@ -576,33 +666,24 @@ err:
ob = ERR_PTR(-BCH_ERR_no_buckets_found);
if (!IS_ERR(ob))
- trace_and_count(c, bucket_alloc, ca,
- bch2_watermarks[watermark],
- ob->bucket,
- usage->d[BCH_DATA_free].buckets,
- avail,
- bch2_copygc_wait_amount(c),
- c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
- &s,
- cl == NULL,
- "");
+ ob->data_type = data_type;
+
+ if (!IS_ERR(ob))
+ count_event(c, bucket_alloc);
else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart))
- trace_and_count(c, bucket_alloc_fail, ca,
- bch2_watermarks[watermark],
- 0,
- usage->d[BCH_DATA_free].buckets,
- avail,
- bch2_copygc_wait_amount(c),
- c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
- &s,
- cl == NULL,
- bch2_err_str(PTR_ERR(ob)));
+ count_event(c, bucket_alloc_fail);
+
+ if (!IS_ERR(ob)
+ ? trace_bucket_alloc_enabled()
+ : trace_bucket_alloc_fail_enabled())
+ trace_bucket_alloc2(c, ca, watermark, data_type, cl, usage, &s, ob);
return ob;
}
struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
enum bch_watermark watermark,
+ enum bch_data_type data_type,
struct closure *cl)
{
struct bch_dev_usage usage;
@@ -610,7 +691,7 @@ struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
bch2_trans_do(c, NULL, NULL, 0,
PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(trans, ca, watermark,
- cl, &usage)));
+ data_type, cl, &usage)));
return ob;
}
@@ -676,8 +757,7 @@ static int add_new_bucket(struct bch_fs *c,
unsigned flags,
struct open_bucket *ob)
{
- unsigned durability =
- bch_dev_bkey_exists(c, ob->dev)->mi.durability;
+ unsigned durability = ob_dev(c, ob)->mi.durability;
BUG_ON(*nr_effective >= nr_replicas);
@@ -709,37 +789,28 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
struct bch_fs *c = trans->c;
struct dev_alloc_list devs_sorted =
bch2_dev_alloc_list(c, stripe, devs_may_alloc);
- unsigned dev;
- struct bch_dev *ca;
int ret = -BCH_ERR_insufficient_devices;
- unsigned i;
BUG_ON(*nr_effective >= nr_replicas);
- for (i = 0; i < devs_sorted.nr; i++) {
+ for (unsigned i = 0; i < devs_sorted.nr; i++) {
struct bch_dev_usage usage;
struct open_bucket *ob;
- dev = devs_sorted.devs[i];
-
- rcu_read_lock();
- ca = rcu_dereference(c->devs[dev]);
- if (ca)
- percpu_ref_get(&ca->ref);
- rcu_read_unlock();
-
+ unsigned dev = devs_sorted.devs[i];
+ struct bch_dev *ca = bch2_dev_tryget_noerror(c, dev);
if (!ca)
continue;
if (!ca->mi.durability && *have_cache) {
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
continue;
}
- ob = bch2_bucket_alloc_trans(trans, ca, watermark, cl, &usage);
+ ob = bch2_bucket_alloc_trans(trans, ca, watermark, data_type, cl, &usage);
if (!IS_ERR(ob))
bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
if (IS_ERR(ob)) {
ret = PTR_ERR(ob);
@@ -748,8 +819,6 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
continue;
}
- ob->data_type = data_type;
-
if (add_new_bucket(c, ptrs, devs_may_alloc,
nr_replicas, nr_effective,
have_cache, flags, ob)) {
@@ -834,7 +903,7 @@ static bool want_bucket(struct bch_fs *c,
bool *have_cache, bool ec,
struct open_bucket *ob)
{
- struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+ struct bch_dev *ca = ob_dev(c, ob);
if (!test_bit(ob->dev, devs_may_alloc->d))
return false;
@@ -904,7 +973,7 @@ static int bucket_alloc_set_partial(struct bch_fs *c,
struct open_bucket *ob = c->open_buckets + c->open_buckets_partial[i];
if (want_bucket(c, wp, devs_may_alloc, have_cache, ec, ob)) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+ struct bch_dev *ca = ob_dev(c, ob);
struct bch_dev_usage usage;
u64 avail;
@@ -1289,7 +1358,7 @@ deallocate_extra_replicas(struct bch_fs *c,
unsigned i;
open_bucket_for_each(c, ptrs, ob, i) {
- unsigned d = bch_dev_bkey_exists(c, ob->dev)->mi.durability;
+ unsigned d = ob_dev(c, ob)->mi.durability;
if (d && d <= extra_replicas) {
extra_replicas -= d;
@@ -1340,6 +1409,10 @@ retry:
*wp_ret = wp = writepoint_find(trans, write_point.v);
+ ret = bch2_trans_relock(trans);
+ if (ret)
+ goto err;
+
/* metadata may not allocate on cache devices: */
if (wp->data_type != BCH_DATA_user)
have_cache = true;
@@ -1442,7 +1515,7 @@ err:
struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
{
- struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+ struct bch_dev *ca = ob_dev(c, ob);
return (struct bch_extent_ptr) {
.type = 1 << BCH_EXTENT_ENTRY_ptr,
@@ -1518,7 +1591,7 @@ void bch2_fs_allocator_foreground_init(struct bch_fs *c)
static void bch2_open_bucket_to_text(struct printbuf *out, struct bch_fs *c, struct open_bucket *ob)
{
- struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+ struct bch_dev *ca = ob_dev(c, ob);
unsigned data_type = ob->data_type;
barrier(); /* READ_ONCE() doesn't work on bitfields */
@@ -1620,3 +1693,104 @@ void bch2_write_points_to_text(struct printbuf *out, struct bch_fs *c)
prt_str(out, "Btree write point\n");
bch2_write_point_to_text(out, c, &c->btree_write_point);
}
+
+void bch2_fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ unsigned nr[BCH_DATA_NR];
+
+ memset(nr, 0, sizeof(nr));
+
+ for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
+ nr[c->open_buckets[i].data_type]++;
+
+ printbuf_tabstop_push(out, 24);
+
+ percpu_down_read(&c->mark_lock);
+ prt_printf(out, "hidden\t%llu\n", bch2_fs_usage_read_one(c, &c->usage_base->b.hidden));
+ prt_printf(out, "btree\t%llu\n", bch2_fs_usage_read_one(c, &c->usage_base->b.btree));
+ prt_printf(out, "data\t%llu\n", bch2_fs_usage_read_one(c, &c->usage_base->b.data));
+ prt_printf(out, "cached\t%llu\n", bch2_fs_usage_read_one(c, &c->usage_base->b.cached));
+ prt_printf(out, "reserved\t%llu\n", bch2_fs_usage_read_one(c, &c->usage_base->b.reserved));
+ prt_printf(out, "online_reserved\t%llu\n", percpu_u64_get(c->online_reserved));
+ prt_printf(out, "nr_inodes\t%llu\n", bch2_fs_usage_read_one(c, &c->usage_base->b.nr_inodes));
+ percpu_up_read(&c->mark_lock);
+
+ prt_newline(out);
+ prt_printf(out, "freelist_wait\t%s\n", c->freelist_wait.list.first ? "waiting" : "empty");
+ prt_printf(out, "open buckets allocated\t%i\n", OPEN_BUCKETS_COUNT - c->open_buckets_nr_free);
+ prt_printf(out, "open buckets total\t%u\n", OPEN_BUCKETS_COUNT);
+ prt_printf(out, "open_buckets_wait\t%s\n", c->open_buckets_wait.list.first ? "waiting" : "empty");
+ prt_printf(out, "open_buckets_btree\t%u\n", nr[BCH_DATA_btree]);
+ prt_printf(out, "open_buckets_user\t%u\n", nr[BCH_DATA_user]);
+ prt_printf(out, "btree reserve cache\t%u\n", c->btree_reserve_cache_nr);
+}
+
+void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
+{
+ struct bch_fs *c = ca->fs;
+ struct bch_dev_usage stats = bch2_dev_usage_read(ca);
+ unsigned nr[BCH_DATA_NR];
+
+ memset(nr, 0, sizeof(nr));
+
+ for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
+ nr[c->open_buckets[i].data_type]++;
+
+ printbuf_tabstop_push(out, 12);
+ printbuf_tabstop_push(out, 16);
+ printbuf_tabstop_push(out, 16);
+ printbuf_tabstop_push(out, 16);
+ printbuf_tabstop_push(out, 16);
+
+ bch2_dev_usage_to_text(out, &stats);
+
+ prt_newline(out);
+
+ prt_printf(out, "reserves:\n");
+ for (unsigned i = 0; i < BCH_WATERMARK_NR; i++)
+ prt_printf(out, "%s\t%llu\r\n", bch2_watermarks[i], bch2_dev_buckets_reserved(ca, i));
+
+ prt_newline(out);
+
+ printbuf_tabstops_reset(out);
+ printbuf_tabstop_push(out, 12);
+ printbuf_tabstop_push(out, 16);
+
+ prt_printf(out, "open buckets\t%i\r\n", ca->nr_open_buckets);
+ prt_printf(out, "buckets to invalidate\t%llu\r\n", should_invalidate_buckets(ca, stats));
+}
+
+void bch2_print_allocator_stuck(struct bch_fs *c)
+{
+ struct printbuf buf = PRINTBUF;
+
+ prt_printf(&buf, "Allocator stuck? Waited for 10 seconds\n");
+
+ prt_printf(&buf, "Allocator debug:\n");
+ printbuf_indent_add(&buf, 2);
+ bch2_fs_alloc_debug_to_text(&buf, c);
+ printbuf_indent_sub(&buf, 2);
+ prt_newline(&buf);
+
+ for_each_online_member(c, ca) {
+ prt_printf(&buf, "Dev %u:\n", ca->dev_idx);
+ printbuf_indent_add(&buf, 2);
+ bch2_dev_alloc_debug_to_text(&buf, ca);
+ printbuf_indent_sub(&buf, 2);
+ prt_newline(&buf);
+ }
+
+ prt_printf(&buf, "Copygc debug:\n");
+ printbuf_indent_add(&buf, 2);
+ bch2_copygc_wait_to_text(&buf, c);
+ printbuf_indent_sub(&buf, 2);
+ prt_newline(&buf);
+
+ prt_printf(&buf, "Journal debug:\n");
+ printbuf_indent_add(&buf, 2);
+ bch2_journal_debug_to_text(&buf, &c->journal);
+ printbuf_indent_sub(&buf, 2);
+
+ bch2_print_string_as_lines(KERN_ERR, buf.buf);
+ printbuf_exit(&buf);
+}
diff --git a/fs/bcachefs/alloc_foreground.h b/fs/bcachefs/alloc_foreground.h
index 7aaeec44c746..a42c9730d32a 100644
--- a/fs/bcachefs/alloc_foreground.h
+++ b/fs/bcachefs/alloc_foreground.h
@@ -30,8 +30,14 @@ void bch2_dev_stripe_increment(struct bch_dev *, struct dev_stripe_state *);
long bch2_bucket_alloc_new_fs(struct bch_dev *);
+static inline struct bch_dev *ob_dev(struct bch_fs *c, struct open_bucket *ob)
+{
+ return bch2_dev_have_ref(c, ob->dev);
+}
+
struct open_bucket *bch2_bucket_alloc(struct bch_fs *, struct bch_dev *,
- enum bch_watermark, struct closure *);
+ enum bch_watermark, enum bch_data_type,
+ struct closure *);
static inline void ob_push(struct bch_fs *c, struct open_buckets *obs,
struct open_bucket *ob)
@@ -184,7 +190,7 @@ bch2_alloc_sectors_append_ptrs_inlined(struct bch_fs *c, struct write_point *wp,
wp->sectors_allocated += sectors;
open_bucket_for_each(c, &wp->ptrs, ob, i) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+ struct bch_dev *ca = ob_dev(c, ob);
struct bch_extent_ptr ptr = bch2_ob_ptr(c, ob);
ptr.cached = cached ||
@@ -221,4 +227,9 @@ void bch2_open_buckets_partial_to_text(struct printbuf *, struct bch_fs *);
void bch2_write_points_to_text(struct printbuf *, struct bch_fs *);
+void bch2_fs_alloc_debug_to_text(struct printbuf *, struct bch_fs *);
+void bch2_dev_alloc_debug_to_text(struct printbuf *, struct bch_dev *);
+
+void bch2_print_allocator_stuck(struct bch_fs *);
+
#endif /* _BCACHEFS_ALLOC_FOREGROUND_H */
diff --git a/fs/bcachefs/alloc_types.h b/fs/bcachefs/alloc_types.h
index b91b7a461056..9bbb28e90b93 100644
--- a/fs/bcachefs/alloc_types.h
+++ b/fs/bcachefs/alloc_types.h
@@ -9,11 +9,18 @@
#include "fifo.h"
struct bucket_alloc_state {
+ enum {
+ BTREE_BITMAP_NO,
+ BTREE_BITMAP_YES,
+ BTREE_BITMAP_ANY,
+ } btree_bitmap;
+
u64 buckets_seen;
u64 skipped_open;
u64 skipped_need_journal_commit;
u64 skipped_nocow;
u64 skipped_nouse;
+ u64 skipped_mi_btree_bitmap;
};
#define BCH_WATERMARKS() \
@@ -22,7 +29,8 @@ struct bucket_alloc_state {
x(copygc) \
x(btree) \
x(btree_copygc) \
- x(reclaim)
+ x(reclaim) \
+ x(interior_updates)
enum bch_watermark {
#define x(name) BCH_WATERMARK_##name,
diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c
index 8cb35ea572cb..692b1c7d5018 100644
--- a/fs/bcachefs/backpointers.c
+++ b/fs/bcachefs/backpointers.c
@@ -8,6 +8,7 @@
#include "btree_update.h"
#include "btree_update_interior.h"
#include "btree_write_buffer.h"
+#include "checksum.h"
#include "error.h"
#include <linux/mm.h>
@@ -22,6 +23,7 @@ static bool extent_matches_bp(struct bch_fs *c,
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
+ rcu_read_lock();
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
struct bpos bucket2;
struct bch_backpointer bp2;
@@ -29,28 +31,46 @@ static bool extent_matches_bp(struct bch_fs *c,
if (p.ptr.cached)
continue;
- bch2_extent_ptr_to_bp(c, btree_id, level, k, p,
- &bucket2, &bp2);
+ struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
+ if (!ca)
+ continue;
+
+ bch2_extent_ptr_to_bp(c, ca, btree_id, level, k, p, entry, &bucket2, &bp2);
if (bpos_eq(bucket, bucket2) &&
- !memcmp(&bp, &bp2, sizeof(bp)))
+ !memcmp(&bp, &bp2, sizeof(bp))) {
+ rcu_read_unlock();
return true;
+ }
}
+ rcu_read_unlock();
return false;
}
int bch2_backpointer_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
struct printbuf *err)
{
struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
- struct bpos bucket = bp_pos_to_bucket(c, bp.k->p);
+
+ rcu_read_lock();
+ struct bch_dev *ca = bch2_dev_rcu(c, bp.k->p.inode);
+ if (!ca) {
+ /* these will be caught by fsck */
+ rcu_read_unlock();
+ return 0;
+ }
+
+ struct bpos bucket = bp_pos_to_bucket(ca, bp.k->p);
+ struct bpos bp_pos = bucket_pos_to_bp_noerror(ca, bucket, bp.v->bucket_offset);
+ rcu_read_unlock();
int ret = 0;
- bkey_fsck_err_on(!bpos_eq(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset)),
+ bkey_fsck_err_on((bp.v->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT) >= ca->mi.bucket_size ||
+ !bpos_eq(bp.k->p, bp_pos),
c, err,
- backpointer_pos_wrong,
- "backpointer at wrong pos");
+ backpointer_bucket_offset_wrong,
+ "backpointer bucket_offset wrong");
fsck_err:
return ret;
}
@@ -68,10 +88,16 @@ void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer
void bch2_backpointer_k_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
{
- if (bch2_dev_exists2(c, k.k->p.inode)) {
+ rcu_read_lock();
+ struct bch_dev *ca = bch2_dev_rcu(c, k.k->p.inode);
+ if (ca) {
+ struct bpos bucket = bp_pos_to_bucket(ca, k.k->p);
+ rcu_read_unlock();
prt_str(out, "bucket=");
- bch2_bpos_to_text(out, bp_pos_to_bucket(c, k.k->p));
+ bch2_bpos_to_text(out, bucket);
prt_str(out, " ");
+ } else {
+ rcu_read_unlock();
}
bch2_backpointer_to_text(out, bkey_s_c_to_backpointer(k).v);
@@ -110,8 +136,7 @@ static noinline int backpointer_mod_err(struct btree_trans *trans,
bch_err(c, "%s", buf.buf);
} else if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_extents_to_backpointers) {
- prt_printf(&buf, "backpointer not found when deleting");
- prt_newline(&buf);
+ prt_printf(&buf, "backpointer not found when deleting\n");
printbuf_indent_add(&buf, 2);
prt_printf(&buf, "searching for ");
@@ -138,6 +163,7 @@ static noinline int backpointer_mod_err(struct btree_trans *trans,
}
int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans,
+ struct bch_dev *ca,
struct bpos bucket,
struct bch_backpointer bp,
struct bkey_s_c orig_k,
@@ -154,7 +180,7 @@ int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans,
return ret;
bkey_backpointer_init(&bp_k->k_i);
- bp_k->k.p = bucket_pos_to_bp(trans->c, bucket, bp.bucket_offset);
+ bp_k->k.p = bucket_pos_to_bp(ca, bucket, bp.bucket_offset);
bp_k->v = bp;
if (!insert) {
@@ -164,9 +190,9 @@ int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans,
k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers,
bp_k->k.p,
- BTREE_ITER_INTENT|
- BTREE_ITER_SLOTS|
- BTREE_ITER_WITH_UPDATES);
+ BTREE_ITER_intent|
+ BTREE_ITER_slots|
+ BTREE_ITER_with_updates);
ret = bkey_err(k);
if (ret)
goto err;
@@ -190,13 +216,13 @@ err:
* Find the next backpointer >= *bp_offset:
*/
int bch2_get_next_backpointer(struct btree_trans *trans,
+ struct bch_dev *ca,
struct bpos bucket, int gen,
struct bpos *bp_pos,
struct bch_backpointer *bp,
unsigned iter_flags)
{
- struct bch_fs *c = trans->c;
- struct bpos bp_end_pos = bucket_pos_to_bp(c, bpos_nosnap_successor(bucket), 0);
+ struct bpos bp_end_pos = bucket_pos_to_bp(ca, bpos_nosnap_successor(bucket), 0);
struct btree_iter alloc_iter = { NULL }, bp_iter = { NULL };
struct bkey_s_c k;
int ret = 0;
@@ -206,7 +232,7 @@ int bch2_get_next_backpointer(struct btree_trans *trans,
if (gen >= 0) {
k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc,
- bucket, BTREE_ITER_CACHED|iter_flags);
+ bucket, BTREE_ITER_cached|iter_flags);
ret = bkey_err(k);
if (ret)
goto out;
@@ -216,7 +242,7 @@ int bch2_get_next_backpointer(struct btree_trans *trans,
goto done;
}
- *bp_pos = bpos_max(*bp_pos, bucket_pos_to_bp(c, bucket, 0));
+ *bp_pos = bpos_max(*bp_pos, bucket_pos_to_bp(ca, bucket, 0));
for_each_btree_key_norestart(trans, bp_iter, BTREE_ID_backpointers,
*bp_pos, iter_flags, k, ret) {
@@ -242,7 +268,6 @@ static void backpointer_not_found(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct printbuf buf = PRINTBUF;
- struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
/*
* If we're using the btree write buffer, the backpointer we were
@@ -252,6 +277,10 @@ static void backpointer_not_found(struct btree_trans *trans,
if (likely(!bch2_backpointers_no_use_write_buffer))
return;
+ struct bpos bucket;
+ if (!bp_pos_to_bucket_nodev(c, bp_pos, &bucket))
+ return;
+
prt_printf(&buf, "backpointer doesn't match %s it points to:\n ",
bp.level ? "btree node" : "extent");
prt_printf(&buf, "bucket: ");
@@ -281,15 +310,17 @@ struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans,
{
if (likely(!bp.level)) {
struct bch_fs *c = trans->c;
- struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
- struct bkey_s_c k;
+
+ struct bpos bucket;
+ if (!bp_pos_to_bucket_nodev(c, bp_pos, &bucket))
+ return bkey_s_c_err(-EIO);
bch2_trans_node_iter_init(trans, iter,
bp.btree_id,
bp.pos,
0, 0,
iter_flags);
- k = bch2_btree_iter_peek_slot(iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
if (bkey_err(k)) {
bch2_trans_iter_exit(trans, iter);
return k;
@@ -318,18 +349,20 @@ struct btree *bch2_backpointer_get_node(struct btree_trans *trans,
struct bch_backpointer bp)
{
struct bch_fs *c = trans->c;
- struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
- struct btree *b;
BUG_ON(!bp.level);
+ struct bpos bucket;
+ if (!bp_pos_to_bucket_nodev(c, bp_pos, &bucket))
+ return ERR_PTR(-EIO);
+
bch2_trans_node_iter_init(trans, iter,
bp.btree_id,
bp.pos,
0,
bp.level - 1,
0);
- b = bch2_btree_iter_peek_node(iter);
+ struct btree *b = bch2_btree_iter_peek_node(iter);
if (IS_ERR_OR_NULL(b))
goto err;
@@ -360,16 +393,16 @@ static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_
struct printbuf buf = PRINTBUF;
int ret = 0;
- if (fsck_err_on(!bch2_dev_exists2(c, k.k->p.inode), c,
- backpointer_to_missing_device,
- "backpointer for missing device:\n%s",
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
- ret = bch2_btree_delete_at(trans, bp_iter, 0);
+ struct bpos bucket;
+ if (!bp_pos_to_bucket_nodev_noerror(c, k.k->p, &bucket)) {
+ if (fsck_err(c, backpointer_to_missing_device,
+ "backpointer for missing device:\n%s",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+ ret = bch2_btree_delete_at(trans, bp_iter, 0);
goto out;
}
- alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc,
- bp_pos_to_bucket(c, k.k->p), 0);
+ alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc, bucket, 0);
ret = bkey_err(alloc_k);
if (ret)
goto out;
@@ -378,7 +411,7 @@ static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_
backpointer_to_missing_alloc,
"backpointer for nonexistent alloc key: %llu:%llu:0\n%s",
alloc_iter.pos.inode, alloc_iter.pos.offset,
- (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
ret = bch2_btree_delete_at(trans, bp_iter, 0);
goto out;
}
@@ -414,6 +447,84 @@ struct extents_to_bp_state {
struct bkey_buf last_flushed;
};
+static int drop_dev_and_update(struct btree_trans *trans, enum btree_id btree,
+ struct bkey_s_c extent, unsigned dev)
+{
+ struct bkey_i *n = bch2_bkey_make_mut_noupdate(trans, extent);
+ int ret = PTR_ERR_OR_ZERO(n);
+ if (ret)
+ return ret;
+
+ bch2_bkey_drop_device(bkey_i_to_s(n), dev);
+ return bch2_btree_insert_trans(trans, btree, n, 0);
+}
+
+static int check_extent_checksum(struct btree_trans *trans,
+ enum btree_id btree, struct bkey_s_c extent,
+ enum btree_id o_btree, struct bkey_s_c extent2, unsigned dev)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(extent);
+ const union bch_extent_entry *entry;
+ struct extent_ptr_decoded p;
+ struct printbuf buf = PRINTBUF;
+ void *data_buf = NULL;
+ struct bio *bio = NULL;
+ size_t bytes;
+ int ret = 0;
+
+ if (bkey_is_btree_ptr(extent.k))
+ return false;
+
+ bkey_for_each_ptr_decode(extent.k, ptrs, p, entry)
+ if (p.ptr.dev == dev)
+ goto found;
+ BUG();
+found:
+ if (!p.crc.csum_type)
+ return false;
+
+ bytes = p.crc.compressed_size << 9;
+
+ struct bch_dev *ca = bch2_dev_get_ioref(c, dev, READ);
+ if (!ca)
+ return false;
+
+ data_buf = kvmalloc(bytes, GFP_KERNEL);
+ if (!data_buf) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ bio = bio_alloc(ca->disk_sb.bdev, buf_pages(data_buf, bytes), REQ_OP_READ, GFP_KERNEL);
+ bio->bi_iter.bi_sector = p.ptr.offset;
+ bch2_bio_map(bio, data_buf, bytes);
+ ret = submit_bio_wait(bio);
+ if (ret)
+ goto err;
+
+ prt_str(&buf, "extents pointing to same space, but first extent checksum bad:");
+ prt_printf(&buf, "\n %s ", bch2_btree_id_str(btree));
+ bch2_bkey_val_to_text(&buf, c, extent);
+ prt_printf(&buf, "\n %s ", bch2_btree_id_str(o_btree));
+ bch2_bkey_val_to_text(&buf, c, extent2);
+
+ struct nonce nonce = extent_nonce(extent.k->version, p.crc);
+ struct bch_csum csum = bch2_checksum(c, p.crc.csum_type, nonce, data_buf, bytes);
+ if (fsck_err_on(bch2_crc_cmp(csum, p.crc.csum),
+ c, dup_backpointer_to_bad_csum_extent,
+ "%s", buf.buf))
+ ret = drop_dev_and_update(trans, btree, extent, dev) ?: 1;
+fsck_err:
+err:
+ if (bio)
+ bio_put(bio);
+ kvfree(data_buf);
+ percpu_ref_put(&ca->io_ref);
+ printbuf_exit(&buf);
+ return ret;
+}
+
static int check_bp_exists(struct btree_trans *trans,
struct extents_to_bp_state *s,
struct bpos bucket,
@@ -421,23 +532,32 @@ static int check_bp_exists(struct btree_trans *trans,
struct bkey_s_c orig_k)
{
struct bch_fs *c = trans->c;
- struct btree_iter bp_iter = { NULL };
+ struct btree_iter bp_iter = {};
+ struct btree_iter other_extent_iter = {};
struct printbuf buf = PRINTBUF;
struct bkey_s_c bp_k;
struct bkey_buf tmp;
- int ret;
+ int ret = 0;
bch2_bkey_buf_init(&tmp);
+ struct bch_dev *ca = bch2_dev_bucket_tryget(c, bucket);
+ if (!ca) {
+ prt_str(&buf, "extent for nonexistent device:bucket ");
+ bch2_bpos_to_text(&buf, bucket);
+ prt_str(&buf, "\n ");
+ bch2_bkey_val_to_text(&buf, c, orig_k);
+ bch_err(c, "%s", buf.buf);
+ ret = -BCH_ERR_fsck_repair_unimplemented;
+ goto err;
+ }
+
if (bpos_lt(bucket, s->bucket_start) ||
bpos_gt(bucket, s->bucket_end))
- return 0;
-
- if (!bch2_dev_bucket_exists(c, bucket))
- goto missing;
+ goto out;
bp_k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers,
- bucket_pos_to_bp(c, bucket, bp.bucket_offset),
+ bucket_pos_to_bp(ca, bucket, bp.bucket_offset),
0);
ret = bkey_err(bp_k);
if (ret)
@@ -461,24 +581,98 @@ static int check_bp_exists(struct btree_trans *trans,
ret = -BCH_ERR_transaction_restart_write_buffer_flush;
goto out;
}
- goto missing;
+
+ goto check_existing_bp;
}
out:
err:
fsck_err:
+ bch2_trans_iter_exit(trans, &other_extent_iter);
bch2_trans_iter_exit(trans, &bp_iter);
bch2_bkey_buf_exit(&tmp, c);
+ bch2_dev_put(ca);
printbuf_exit(&buf);
return ret;
+check_existing_bp:
+ /* Do we have a backpointer for a different extent? */
+ if (bp_k.k->type != KEY_TYPE_backpointer)
+ goto missing;
+
+ struct bch_backpointer other_bp = *bkey_s_c_to_backpointer(bp_k).v;
+
+ struct bkey_s_c other_extent =
+ bch2_backpointer_get_key(trans, &other_extent_iter, bp_k.k->p, other_bp, 0);
+ ret = bkey_err(other_extent);
+ if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node)
+ ret = 0;
+ if (ret)
+ goto err;
+
+ if (!other_extent.k)
+ goto missing;
+
+ if (bch2_extents_match(orig_k, other_extent)) {
+ printbuf_reset(&buf);
+ prt_printf(&buf, "duplicate versions of same extent, deleting smaller\n ");
+ bch2_bkey_val_to_text(&buf, c, orig_k);
+ prt_str(&buf, "\n ");
+ bch2_bkey_val_to_text(&buf, c, other_extent);
+ bch_err(c, "%s", buf.buf);
+
+ if (other_extent.k->size <= orig_k.k->size) {
+ ret = drop_dev_and_update(trans, other_bp.btree_id, other_extent, bucket.inode);
+ if (ret)
+ goto err;
+ goto out;
+ } else {
+ ret = drop_dev_and_update(trans, bp.btree_id, orig_k, bucket.inode);
+ if (ret)
+ goto err;
+ goto missing;
+ }
+ }
+
+ ret = check_extent_checksum(trans, other_bp.btree_id, other_extent, bp.btree_id, orig_k, bucket.inode);
+ if (ret < 0)
+ goto err;
+ if (ret) {
+ ret = 0;
+ goto missing;
+ }
+
+ ret = check_extent_checksum(trans, bp.btree_id, orig_k, other_bp.btree_id, other_extent, bucket.inode);
+ if (ret < 0)
+ goto err;
+ if (ret) {
+ ret = 0;
+ goto out;
+ }
+
+ printbuf_reset(&buf);
+ prt_printf(&buf, "duplicate extents pointing to same space on dev %llu\n ", bucket.inode);
+ bch2_bkey_val_to_text(&buf, c, orig_k);
+ prt_str(&buf, "\n ");
+ bch2_bkey_val_to_text(&buf, c, other_extent);
+ bch_err(c, "%s", buf.buf);
+ ret = -BCH_ERR_fsck_repair_unimplemented;
+ goto err;
missing:
+ printbuf_reset(&buf);
prt_printf(&buf, "missing backpointer for btree=%s l=%u ",
bch2_btree_id_str(bp.btree_id), bp.level);
bch2_bkey_val_to_text(&buf, c, orig_k);
- prt_printf(&buf, "\nbp pos ");
- bch2_bpos_to_text(&buf, bp_iter.pos);
+ prt_printf(&buf, "\n got: ");
+ bch2_bkey_val_to_text(&buf, c, bp_k);
+
+ struct bkey_i_backpointer n_bp_k;
+ bkey_backpointer_init(&n_bp_k.k_i);
+ n_bp_k.k.p = bucket_pos_to_bp(ca, bucket, bp.bucket_offset);
+ n_bp_k.v = bp;
+ prt_printf(&buf, "\n want: ");
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&n_bp_k.k_i));
if (fsck_err(c, ptr_to_missing_backpointer, "%s", buf.buf))
- ret = bch2_bucket_backpointer_mod(trans, bucket, bp, orig_k, true);
+ ret = bch2_bucket_backpointer_mod(trans, ca, bucket, bp, orig_k, true);
goto out;
}
@@ -502,8 +696,14 @@ static int check_extent_to_backpointers(struct btree_trans *trans,
if (p.ptr.cached)
continue;
- bch2_extent_ptr_to_bp(c, btree, level,
- k, p, &bucket_pos, &bp);
+ rcu_read_lock();
+ struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
+ if (ca)
+ bch2_extent_ptr_to_bp(c, ca, btree, level, k, p, entry, &bucket_pos, &bp);
+ rcu_read_unlock();
+
+ if (!ca)
+ continue;
ret = check_bp_exists(trans, s, bucket_pos, bp, k);
if (ret)
@@ -596,7 +796,7 @@ static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
__for_each_btree_node(trans, iter, btree,
btree == start.btree ? start.pos : POS_MIN,
- 0, depth, BTREE_ITER_PREFETCH, b, ret) {
+ 0, depth, BTREE_ITER_prefetch, b, ret) {
mem_may_pin -= btree_buf_bytes(b);
if (mem_may_pin <= 0) {
c->btree_cache.pinned_nodes_end = *end =
@@ -630,31 +830,13 @@ static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
while (level >= depth) {
struct btree_iter iter;
- bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0,
- level,
- BTREE_ITER_PREFETCH);
- while (1) {
- bch2_trans_begin(trans);
-
- struct bkey_s_c k = bch2_btree_iter_peek(&iter);
- if (!k.k)
- break;
- ret = bkey_err(k) ?:
- check_extent_to_backpointers(trans, s, btree_id, level, k) ?:
- bch2_trans_commit(trans, NULL, NULL,
- BCH_TRANS_COMMIT_no_enospc);
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
- ret = 0;
- continue;
- }
- if (ret)
- break;
- if (bpos_eq(iter.pos, SPOS_MAX))
- break;
- bch2_btree_iter_advance(&iter);
- }
- bch2_trans_iter_exit(trans, &iter);
+ bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0, level,
+ BTREE_ITER_prefetch);
+ ret = for_each_btree_key_continue(trans, iter, 0, k, ({
+ check_extent_to_backpointers(trans, s, btree_id, level, k) ?:
+ bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
+ }));
if (ret)
return ret;
@@ -772,7 +954,7 @@ static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans,
struct bpos last_flushed_pos = SPOS_MAX;
return for_each_btree_key_commit(trans, iter, BTREE_ID_backpointers,
- POS_MIN, BTREE_ITER_PREFETCH, k,
+ POS_MIN, BTREE_ITER_prefetch, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
check_one_backpointer(trans, start, end,
bkey_s_c_to_backpointer(k),
diff --git a/fs/bcachefs/backpointers.h b/fs/bcachefs/backpointers.h
index 327365a9feac..6021de1c5e98 100644
--- a/fs/bcachefs/backpointers.h
+++ b/fs/bcachefs/backpointers.h
@@ -6,6 +6,7 @@
#include "btree_iter.h"
#include "btree_update.h"
#include "buckets.h"
+#include "error.h"
#include "super.h"
static inline u64 swab40(u64 x)
@@ -18,7 +19,7 @@ static inline u64 swab40(u64 x)
}
int bch2_backpointer_invalid(struct bch_fs *, struct bkey_s_c k,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
void bch2_backpointer_to_text(struct printbuf *, const struct bch_backpointer *);
void bch2_backpointer_k_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
void bch2_backpointer_swab(struct bkey_s);
@@ -36,50 +37,67 @@ void bch2_backpointer_swab(struct bkey_s);
* Convert from pos in backpointer btree to pos of corresponding bucket in alloc
* btree:
*/
-static inline struct bpos bp_pos_to_bucket(const struct bch_fs *c,
- struct bpos bp_pos)
+static inline struct bpos bp_pos_to_bucket(const struct bch_dev *ca, struct bpos bp_pos)
{
- struct bch_dev *ca = bch_dev_bkey_exists(c, bp_pos.inode);
u64 bucket_sector = bp_pos.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT;
return POS(bp_pos.inode, sector_to_bucket(ca, bucket_sector));
}
+static inline bool bp_pos_to_bucket_nodev_noerror(struct bch_fs *c, struct bpos bp_pos, struct bpos *bucket)
+{
+ rcu_read_lock();
+ struct bch_dev *ca = bch2_dev_rcu(c, bp_pos.inode);
+ if (ca)
+ *bucket = bp_pos_to_bucket(ca, bp_pos);
+ rcu_read_unlock();
+ return ca != NULL;
+}
+
+static inline bool bp_pos_to_bucket_nodev(struct bch_fs *c, struct bpos bp_pos, struct bpos *bucket)
+{
+ return !bch2_fs_inconsistent_on(!bp_pos_to_bucket_nodev_noerror(c, bp_pos, bucket),
+ c, "backpointer for missing device %llu", bp_pos.inode);
+}
+
+static inline struct bpos bucket_pos_to_bp_noerror(const struct bch_dev *ca,
+ struct bpos bucket,
+ u64 bucket_offset)
+{
+ return POS(bucket.inode,
+ (bucket_to_sector(ca, bucket.offset) <<
+ MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset);
+}
+
/*
* Convert from pos in alloc btree + bucket offset to pos in backpointer btree:
*/
-static inline struct bpos bucket_pos_to_bp(const struct bch_fs *c,
+static inline struct bpos bucket_pos_to_bp(const struct bch_dev *ca,
struct bpos bucket,
u64 bucket_offset)
{
- struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
- struct bpos ret;
-
- ret = POS(bucket.inode,
- (bucket_to_sector(ca, bucket.offset) <<
- MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset);
-
- EBUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(c, ret)));
-
+ struct bpos ret = bucket_pos_to_bp_noerror(ca, bucket, bucket_offset);
+ EBUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(ca, ret)));
return ret;
}
-int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *, struct bpos bucket,
- struct bch_backpointer, struct bkey_s_c, bool);
+int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *, struct bch_dev *,
+ struct bpos bucket, struct bch_backpointer, struct bkey_s_c, bool);
static inline int bch2_bucket_backpointer_mod(struct btree_trans *trans,
+ struct bch_dev *ca,
struct bpos bucket,
struct bch_backpointer bp,
struct bkey_s_c orig_k,
bool insert)
{
if (unlikely(bch2_backpointers_no_use_write_buffer))
- return bch2_bucket_backpointer_mod_nowritebuffer(trans, bucket, bp, orig_k, insert);
+ return bch2_bucket_backpointer_mod_nowritebuffer(trans, ca, bucket, bp, orig_k, insert);
struct bkey_i_backpointer bp_k;
bkey_backpointer_init(&bp_k.k_i);
- bp_k.k.p = bucket_pos_to_bp(trans->c, bucket, bp.bucket_offset);
+ bp_k.k.p = bucket_pos_to_bp(ca, bucket, bp.bucket_offset);
bp_k.v = bp;
if (!insert) {
@@ -90,24 +108,44 @@ static inline int bch2_bucket_backpointer_mod(struct btree_trans *trans,
return bch2_trans_update_buffered(trans, BTREE_ID_backpointers, &bp_k.k_i);
}
-static inline enum bch_data_type bkey_ptr_data_type(enum btree_id btree_id, unsigned level,
- struct bkey_s_c k, struct extent_ptr_decoded p)
+static inline enum bch_data_type bch2_bkey_ptr_data_type(struct bkey_s_c k,
+ struct extent_ptr_decoded p,
+ const union bch_extent_entry *entry)
{
- return level ? BCH_DATA_btree :
- p.has_ec ? BCH_DATA_stripe :
- BCH_DATA_user;
+ switch (k.k->type) {
+ case KEY_TYPE_btree_ptr:
+ case KEY_TYPE_btree_ptr_v2:
+ return BCH_DATA_btree;
+ case KEY_TYPE_extent:
+ case KEY_TYPE_reflink_v:
+ return p.has_ec ? BCH_DATA_stripe : BCH_DATA_user;
+ case KEY_TYPE_stripe: {
+ const struct bch_extent_ptr *ptr = &entry->ptr;
+ struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
+
+ BUG_ON(ptr < s.v->ptrs ||
+ ptr >= s.v->ptrs + s.v->nr_blocks);
+
+ return ptr >= s.v->ptrs + s.v->nr_blocks - s.v->nr_redundant
+ ? BCH_DATA_parity
+ : BCH_DATA_user;
+ }
+ default:
+ BUG();
+ }
}
-static inline void bch2_extent_ptr_to_bp(struct bch_fs *c,
+static inline void bch2_extent_ptr_to_bp(struct bch_fs *c, struct bch_dev *ca,
enum btree_id btree_id, unsigned level,
struct bkey_s_c k, struct extent_ptr_decoded p,
+ const union bch_extent_entry *entry,
struct bpos *bucket_pos, struct bch_backpointer *bp)
{
- enum bch_data_type data_type = bkey_ptr_data_type(btree_id, level, k, p);
+ enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry);
s64 sectors = level ? btree_sectors(c) : k.k->size;
u32 bucket_offset;
- *bucket_pos = PTR_BUCKET_POS_OFFSET(c, &p.ptr, &bucket_offset);
+ *bucket_pos = PTR_BUCKET_POS_OFFSET(ca, &p.ptr, &bucket_offset);
*bp = (struct bch_backpointer) {
.btree_id = btree_id,
.level = level,
@@ -119,7 +157,7 @@ static inline void bch2_extent_ptr_to_bp(struct bch_fs *c,
};
}
-int bch2_get_next_backpointer(struct btree_trans *, struct bpos, int,
+int bch2_get_next_backpointer(struct btree_trans *, struct bch_dev *ca, struct bpos, int,
struct bpos *, struct bch_backpointer *, unsigned);
struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *, struct btree_iter *,
struct bpos, struct bch_backpointer,
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index 799aa32b6b4d..bc0ea2c4efef 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -209,7 +209,7 @@
#include "fifo.h"
#include "nocow_locking_types.h"
#include "opts.h"
-#include "recovery_types.h"
+#include "recovery_passes_types.h"
#include "sb-errors_types.h"
#include "seqmutex.h"
#include "time_stats.h"
@@ -359,6 +359,8 @@ do { \
#define BCH_DEBUG_PARAMS_ALWAYS() \
BCH_DEBUG_PARAM(key_merging_disabled, \
"Disables merging of extents") \
+ BCH_DEBUG_PARAM(btree_node_merging_disabled, \
+ "Disables merging of btree nodes") \
BCH_DEBUG_PARAM(btree_gc_always_rewrite, \
"Causes mark and sweep to compact and rewrite every " \
"btree node it traverses") \
@@ -456,6 +458,7 @@ enum bch_time_stats {
#include "alloc_types.h"
#include "btree_types.h"
+#include "btree_node_scan_types.h"
#include "btree_write_buffer_types.h"
#include "buckets_types.h"
#include "buckets_waiting_for_journal_types.h"
@@ -467,6 +470,7 @@ enum bch_time_stats {
#include "quota_types.h"
#include "rebalance_types.h"
#include "replicas_types.h"
+#include "sb-members_types.h"
#include "subvolume_types.h"
#include "super_types.h"
#include "thread_with_file_types.h"
@@ -515,8 +519,8 @@ enum gc_phase {
struct gc_pos {
enum gc_phase phase;
+ u16 level;
struct bpos pos;
- unsigned level;
};
struct reflink_gc {
@@ -533,7 +537,13 @@ struct io_count {
struct bch_dev {
struct kobject kobj;
+#ifdef CONFIG_BCACHEFS_DEBUG
+ atomic_long_t ref;
+ bool dying;
+ unsigned long last_put;
+#else
struct percpu_ref ref;
+#endif
struct completion ref_completion;
struct percpu_ref io_ref;
struct completion io_ref_completion;
@@ -559,14 +569,11 @@ struct bch_dev {
struct bch_devs_mask self;
- /* biosets used in cloned bios for writing multiple replicas */
- struct bio_set replica_set;
-
/*
* Buckets:
* Per-bucket arrays are protected by c->mark_lock, bucket_lock and
* gc_lock, for device resize - holding any is sufficient for access:
- * Or rcu_read_lock(), but only for ptr_stale():
+ * Or rcu_read_lock(), but only for dev_ptr_stale():
*/
struct bucket_array __rcu *buckets_gc;
struct bucket_gens __rcu *bucket_gens;
@@ -580,7 +587,7 @@ struct bch_dev {
/* Allocator: */
u64 new_fs_bucket_idx;
- u64 alloc_cursor;
+ u64 alloc_cursor[3];
unsigned nr_open_buckets;
unsigned nr_btree_reserve;
@@ -614,6 +621,7 @@ struct bch_dev {
*/
#define BCH_FS_FLAGS() \
+ x(new_fs) \
x(started) \
x(may_go_rw) \
x(rw) \
@@ -625,12 +633,12 @@ struct bch_dev {
x(clean_shutdown) \
x(fsck_running) \
x(initial_gc_unfixed) \
- x(need_another_gc) \
x(need_delete_dead_snapshots) \
x(error) \
x(topology_error) \
x(errors_fixed) \
- x(errors_not_fixed)
+ x(errors_not_fixed) \
+ x(no_invalid_checks)
enum bch_fs_flags {
#define x(n) BCH_FS_##n,
@@ -707,10 +715,13 @@ struct btree_trans_buf {
x(stripe_delete) \
x(reflink) \
x(fallocate) \
+ x(fsync) \
+ x(dio_write) \
x(discard) \
x(discard_fast) \
x(invalidate) \
x(delete_dead_snapshots) \
+ x(gc_gens) \
x(snapshot_delete_pagecache) \
x(sysfs) \
x(btree_write_buffer)
@@ -796,6 +807,7 @@ struct bch_fs {
u64 features;
u64 compat;
unsigned long errors_silent[BITS_TO_LONGS(BCH_SB_ERR_MAX)];
+ u64 btrees_lost_data;
} sb;
@@ -810,7 +822,6 @@ struct bch_fs {
/* snapshot.c: */
struct snapshot_table __rcu *snapshots;
- size_t snapshot_table_size;
struct mutex snapshot_table_lock;
struct rw_semaphore snapshot_create_lock;
@@ -922,7 +933,6 @@ struct bch_fs {
/* JOURNAL SEQ BLACKLIST */
struct journal_seq_blacklist_table *
journal_seq_blacklist_table;
- struct work_struct journal_seq_blacklist_gc_work;
/* ALLOCATOR */
spinlock_t freelist_lock;
@@ -953,8 +963,7 @@ struct bch_fs {
struct work_struct discard_fast_work;
/* GARBAGE COLLECTION */
- struct task_struct *gc_thread;
- atomic_t kick_gc;
+ struct work_struct gc_gens_work;
unsigned long gc_count;
enum btree_id gc_gens_btree;
@@ -984,6 +993,7 @@ struct bch_fs {
struct bio_set bio_read;
struct bio_set bio_read_split;
struct bio_set bio_write;
+ struct bio_set replica_set;
struct mutex bio_bounce_pages_lock;
mempool_t bio_bounce_pages;
struct bucket_nocow_lock_table
@@ -1104,12 +1114,13 @@ struct bch_fs {
struct journal_keys journal_keys;
struct list_head journal_iters;
+ struct find_btree_nodes found_btree_nodes;
+
u64 last_bucket_seq_cleanup;
u64 counters_on_mount[BCH_COUNTER_NR];
u64 __percpu *counters;
- unsigned btree_gc_periodic:1;
unsigned copy_gc_enabled:1;
bool promote_whole_extents;
@@ -1244,11 +1255,6 @@ static inline s64 bch2_current_time(const struct bch_fs *c)
return timespec_to_bch2_time(c, now);
}
-static inline bool bch2_dev_exists2(const struct bch_fs *c, unsigned dev)
-{
- return dev < c->sb.nr_devices && c->devs[dev];
-}
-
static inline struct stdio_redirect *bch2_fs_stdio_redirect(struct bch_fs *c)
{
struct stdio_redirect *stdio = c->stdio;
diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h
index bff8750ac0d7..1bebba881d89 100644
--- a/fs/bcachefs/bcachefs_format.h
+++ b/fs/bcachefs/bcachefs_format.h
@@ -76,6 +76,7 @@
#include <asm/byteorder.h>
#include <linux/kernel.h>
#include <linux/uuid.h>
+#include <uapi/linux/magic.h>
#include "vstructs.h"
#ifdef __KERNEL__
@@ -578,7 +579,8 @@ struct bch_member {
__le64 nbuckets; /* device size */
__le16 first_bucket; /* index of first bucket used */
__le16 bucket_size; /* sectors */
- __le32 pad;
+ __u8 btree_bitmap_shift;
+ __u8 pad[3];
__le64 last_mount; /* time_t */
__le64 flags;
@@ -587,8 +589,22 @@ struct bch_member {
__le64 errors_at_reset[BCH_MEMBER_ERROR_NR];
__le64 errors_reset_time;
__le64 seq;
+ __le64 btree_allocated_bitmap;
+ /*
+ * On recovery from a clean shutdown we don't normally read the journal,
+ * but we still want to resume writing from where we left off so we
+ * don't overwrite more than is necessary, for list journal debugging:
+ */
+ __le32 last_journal_bucket;
+ __le32 last_journal_bucket_offset;
};
+/*
+ * This limit comes from the bucket_gens array - it's a single allocation, and
+ * kernel allocation are limited to INT_MAX
+ */
+#define BCH_MEMBER_NBUCKETS_MAX (INT_MAX - 64)
+
#define BCH_MEMBER_V1_BYTES 56
LE64_BITMASK(BCH_MEMBER_STATE, struct bch_member, flags, 0, 4)
@@ -818,6 +834,7 @@ struct bch_sb_field_ext {
struct bch_sb_field field;
__le64 recovery_passes_required[2];
__le64 errors_silent[8];
+ __le64 btrees_lost_data;
};
struct bch_sb_field_downgrade_entry {
@@ -875,7 +892,8 @@ struct bch_sb_field_downgrade {
x(rebalance_work, BCH_VERSION(1, 3)) \
x(member_seq, BCH_VERSION(1, 4)) \
x(subvolume_fs_parent, BCH_VERSION(1, 5)) \
- x(btree_subvolume_children, BCH_VERSION(1, 6))
+ x(btree_subvolume_children, BCH_VERSION(1, 6)) \
+ x(mi_btree_bitmap, BCH_VERSION(1, 7))
enum bcachefs_metadata_version {
bcachefs_metadata_version_min = 9,
@@ -893,6 +911,8 @@ unsigned bcachefs_metadata_required_upgrade_below = bcachefs_metadata_version_re
#define BCH_SB_SECTOR 8
#define BCH_SB_MEMBERS_MAX 64 /* XXX kill */
+#define BCH_SB_LAYOUT_SIZE_BITS_MAX 16 /* 32 MB */
+
struct bch_sb_layout {
__uuid_t magic; /* bcachefs superblock UUID */
__u8 layout_type;
@@ -1271,7 +1291,7 @@ enum bch_compression_opts {
UUID_INIT(0xc68573f6, 0x66ce, 0x90a9, \
0xd9, 0x6a, 0x60, 0xcf, 0x80, 0x3d, 0xf7, 0xef)
-#define BCACHEFS_STATFS_MAGIC 0xca451a4e
+#define BCACHEFS_STATFS_MAGIC BCACHEFS_SUPER_MAGIC
#define JSET_MAGIC __cpu_to_le64(0x245235c1a3625032ULL)
#define BSET_MAGIC __cpu_to_le64(0x90135c78b99e07f5ULL)
@@ -1313,7 +1333,7 @@ static inline __u64 __bset_magic(struct bch_sb *sb)
x(write_buffer_keys, 11) \
x(datetime, 12)
-enum {
+enum bch_jset_entry_type {
#define x(f, nr) BCH_JSET_ENTRY_##f = nr,
BCH_JSET_ENTRY_TYPES()
#undef x
@@ -1359,7 +1379,7 @@ struct jset_entry_blacklist_v2 {
x(inodes, 1) \
x(key_version, 2)
-enum {
+enum bch_fs_usage_type {
#define x(f, nr) BCH_FS_USAGE_##f = nr,
BCH_FS_USAGE_TYPES()
#undef x
@@ -1500,7 +1520,8 @@ enum btree_id_flags {
BIT_ULL(KEY_TYPE_stripe)) \
x(reflink, 7, BTREE_ID_EXTENTS|BTREE_ID_DATA, \
BIT_ULL(KEY_TYPE_reflink_v)| \
- BIT_ULL(KEY_TYPE_indirect_inline_data)) \
+ BIT_ULL(KEY_TYPE_indirect_inline_data)| \
+ BIT_ULL(KEY_TYPE_error)) \
x(subvolumes, 8, 0, \
BIT_ULL(KEY_TYPE_subvolume)) \
x(snapshots, 9, 0, \
@@ -1534,6 +1555,20 @@ enum btree_id {
BTREE_ID_NR
};
+static inline bool btree_id_is_alloc(enum btree_id id)
+{
+ switch (id) {
+ case BTREE_ID_alloc:
+ case BTREE_ID_backpointers:
+ case BTREE_ID_need_discard:
+ case BTREE_ID_freespace:
+ case BTREE_ID_bucket_gens:
+ return true;
+ default:
+ return false;
+ }
+}
+
#define BTREE_MAX_DEPTH 4U
/* Btree nodes */
diff --git a/fs/bcachefs/bkey.c b/fs/bcachefs/bkey.c
index 76e79a15ba08..f46978e5cb7c 100644
--- a/fs/bcachefs/bkey.c
+++ b/fs/bcachefs/bkey.c
@@ -640,7 +640,7 @@ struct bkey_format bch2_bkey_format_done(struct bkey_format_state *s)
int bch2_bkey_format_invalid(struct bch_fs *c,
struct bkey_format *f,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
struct printbuf *err)
{
unsigned i, bits = KEY_PACKED_BITS_START;
@@ -656,20 +656,17 @@ int bch2_bkey_format_invalid(struct bch_fs *c,
* unpacked format:
*/
for (i = 0; i < f->nr_fields; i++) {
- if (!c || c->sb.version_min >= bcachefs_metadata_version_snapshot) {
+ if ((!c || c->sb.version_min >= bcachefs_metadata_version_snapshot) &&
+ bch2_bkey_format_field_overflows(f, i)) {
unsigned unpacked_bits = bch2_bkey_format_current.bits_per_field[i];
u64 unpacked_max = ~((~0ULL << 1) << (unpacked_bits - 1));
u64 packed_max = f->bits_per_field[i]
? ~((~0ULL << 1) << (f->bits_per_field[i] - 1))
: 0;
- u64 field_offset = le64_to_cpu(f->field_offset[i]);
- if (packed_max + field_offset < packed_max ||
- packed_max + field_offset > unpacked_max) {
- prt_printf(err, "field %u too large: %llu + %llu > %llu",
- i, packed_max, field_offset, unpacked_max);
- return -BCH_ERR_invalid;
- }
+ prt_printf(err, "field %u too large: %llu + %llu > %llu",
+ i, packed_max, le64_to_cpu(f->field_offset[i]), unpacked_max);
+ return -BCH_ERR_invalid;
}
bits += f->bits_per_field[i];
diff --git a/fs/bcachefs/bkey.h b/fs/bcachefs/bkey.h
index cf23ff47bed8..fcd43915df07 100644
--- a/fs/bcachefs/bkey.h
+++ b/fs/bcachefs/bkey.h
@@ -9,10 +9,10 @@
#include "util.h"
#include "vstructs.h"
-enum bkey_invalid_flags {
- BKEY_INVALID_WRITE = (1U << 0),
- BKEY_INVALID_COMMIT = (1U << 1),
- BKEY_INVALID_JOURNAL = (1U << 2),
+enum bch_validate_flags {
+ BCH_VALIDATE_write = (1U << 0),
+ BCH_VALIDATE_commit = (1U << 1),
+ BCH_VALIDATE_journal = (1U << 2),
};
#if 0
@@ -314,6 +314,12 @@ static inline unsigned bkeyp_key_u64s(const struct bkey_format *format,
return bkey_packed(k) ? format->key_u64s : BKEY_U64s;
}
+static inline bool bkeyp_u64s_valid(const struct bkey_format *f,
+ const struct bkey_packed *k)
+{
+ return ((unsigned) k->u64s - bkeyp_key_u64s(f, k) <= U8_MAX - BKEY_U64s);
+}
+
static inline unsigned bkeyp_key_bytes(const struct bkey_format *format,
const struct bkey_packed *k)
{
@@ -568,8 +574,31 @@ static inline void bch2_bkey_format_add_key(struct bkey_format_state *s, const s
void bch2_bkey_format_add_pos(struct bkey_format_state *, struct bpos);
struct bkey_format bch2_bkey_format_done(struct bkey_format_state *);
+
+static inline bool bch2_bkey_format_field_overflows(struct bkey_format *f, unsigned i)
+{
+ unsigned f_bits = f->bits_per_field[i];
+ unsigned unpacked_bits = bch2_bkey_format_current.bits_per_field[i];
+ u64 unpacked_mask = ~((~0ULL << 1) << (unpacked_bits - 1));
+ u64 field_offset = le64_to_cpu(f->field_offset[i]);
+
+ if (f_bits > unpacked_bits)
+ return true;
+
+ if ((f_bits == unpacked_bits) && field_offset)
+ return true;
+
+ u64 f_mask = f_bits
+ ? ~((~0ULL << (f_bits - 1)) << 1)
+ : 0;
+
+ if (((field_offset + f_mask) & unpacked_mask) < field_offset)
+ return true;
+ return false;
+}
+
int bch2_bkey_format_invalid(struct bch_fs *, struct bkey_format *,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
void bch2_bkey_format_to_text(struct printbuf *, const struct bkey_format *);
#endif /* _BCACHEFS_BKEY_H */
diff --git a/fs/bcachefs/bkey_methods.c b/fs/bcachefs/bkey_methods.c
index 5e52684764eb..c2c3dae52186 100644
--- a/fs/bcachefs/bkey_methods.c
+++ b/fs/bcachefs/bkey_methods.c
@@ -27,7 +27,7 @@ const char * const bch2_bkey_types[] = {
};
static int deleted_key_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags, struct printbuf *err)
+ enum bch_validate_flags flags, struct printbuf *err)
{
return 0;
}
@@ -41,7 +41,7 @@ static int deleted_key_invalid(struct bch_fs *c, struct bkey_s_c k,
})
static int empty_val_key_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags, struct printbuf *err)
+ enum bch_validate_flags flags, struct printbuf *err)
{
int ret = 0;
@@ -58,7 +58,7 @@ fsck_err:
})
static int key_type_cookie_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags, struct printbuf *err)
+ enum bch_validate_flags flags, struct printbuf *err)
{
return 0;
}
@@ -82,7 +82,7 @@ static void key_type_cookie_to_text(struct printbuf *out, struct bch_fs *c,
})
static int key_type_inline_data_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags, struct printbuf *err)
+ enum bch_validate_flags flags, struct printbuf *err)
{
return 0;
}
@@ -123,9 +123,12 @@ const struct bkey_ops bch2_bkey_null_ops = {
};
int bch2_bkey_val_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
struct printbuf *err)
{
+ if (test_bit(BCH_FS_no_invalid_checks, &c->flags))
+ return 0;
+
const struct bkey_ops *ops = bch2_bkey_type_ops(k.k->type);
int ret = 0;
@@ -159,9 +162,12 @@ const char *bch2_btree_node_type_str(enum btree_node_type type)
int __bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
enum btree_node_type type,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
struct printbuf *err)
{
+ if (test_bit(BCH_FS_no_invalid_checks, &c->flags))
+ return 0;
+
int ret = 0;
bkey_fsck_err_on(k.k->u64s < BKEY_U64s, c, err,
@@ -171,11 +177,15 @@ int __bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
if (type >= BKEY_TYPE_NR)
return 0;
- bkey_fsck_err_on((flags & BKEY_INVALID_COMMIT) &&
+ bkey_fsck_err_on(k.k->type < KEY_TYPE_MAX &&
+ (type == BKEY_TYPE_btree || (flags & BCH_VALIDATE_commit)) &&
!(bch2_key_types_allowed[type] & BIT_ULL(k.k->type)), c, err,
bkey_invalid_type_for_btree,
"invalid key type for btree %s (%s)",
- bch2_btree_node_type_str(type), bch2_bkey_types[k.k->type]);
+ bch2_btree_node_type_str(type),
+ k.k->type < KEY_TYPE_MAX
+ ? bch2_bkey_types[k.k->type]
+ : "(unknown)");
if (btree_node_type_is_extents(type) && !bkey_whiteout(k.k)) {
bkey_fsck_err_on(k.k->size == 0, c, err,
@@ -220,7 +230,7 @@ fsck_err:
int bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
enum btree_node_type type,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
struct printbuf *err)
{
return __bch2_bkey_invalid(c, k, type, flags, err) ?:
diff --git a/fs/bcachefs/bkey_methods.h b/fs/bcachefs/bkey_methods.h
index 03efe8ee565a..726ef7483763 100644
--- a/fs/bcachefs/bkey_methods.h
+++ b/fs/bcachefs/bkey_methods.h
@@ -22,14 +22,15 @@ extern const struct bkey_ops bch2_bkey_null_ops;
*/
struct bkey_ops {
int (*key_invalid)(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags, struct printbuf *err);
+ enum bch_validate_flags flags, struct printbuf *err);
void (*val_to_text)(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
void (*swab)(struct bkey_s);
bool (*key_normalize)(struct bch_fs *, struct bkey_s);
bool (*key_merge)(struct bch_fs *, struct bkey_s, struct bkey_s_c);
int (*trigger)(struct btree_trans *, enum btree_id, unsigned,
- struct bkey_s_c, struct bkey_s, unsigned);
+ struct bkey_s_c, struct bkey_s,
+ enum btree_iter_update_trigger_flags);
void (*compat)(enum btree_id id, unsigned version,
unsigned big_endian, int write,
struct bkey_s);
@@ -48,11 +49,11 @@ static inline const struct bkey_ops *bch2_bkey_type_ops(enum bch_bkey_type type)
}
int bch2_bkey_val_invalid(struct bch_fs *, struct bkey_s_c,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
int __bch2_bkey_invalid(struct bch_fs *, struct bkey_s_c, enum btree_node_type,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
int bch2_bkey_invalid(struct bch_fs *, struct bkey_s_c, enum btree_node_type,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
int bch2_bkey_in_btree_node(struct bch_fs *, struct btree *,
struct bkey_s_c, struct printbuf *);
@@ -76,56 +77,10 @@ static inline bool bch2_bkey_maybe_mergable(const struct bkey *l, const struct b
bool bch2_bkey_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
-enum btree_update_flags {
- __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE = __BTREE_ITER_FLAGS_END,
- __BTREE_UPDATE_NOJOURNAL,
- __BTREE_UPDATE_KEY_CACHE_RECLAIM,
-
- __BTREE_TRIGGER_NORUN,
- __BTREE_TRIGGER_TRANSACTIONAL,
- __BTREE_TRIGGER_ATOMIC,
- __BTREE_TRIGGER_GC,
- __BTREE_TRIGGER_INSERT,
- __BTREE_TRIGGER_OVERWRITE,
- __BTREE_TRIGGER_BUCKET_INVALIDATE,
-};
-
-#define BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE (1U << __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE)
-#define BTREE_UPDATE_NOJOURNAL (1U << __BTREE_UPDATE_NOJOURNAL)
-#define BTREE_UPDATE_KEY_CACHE_RECLAIM (1U << __BTREE_UPDATE_KEY_CACHE_RECLAIM)
-
-/* Don't run triggers at all */
-#define BTREE_TRIGGER_NORUN (1U << __BTREE_TRIGGER_NORUN)
-
-/*
- * If set, we're running transactional triggers as part of a transaction commit:
- * triggers may generate new updates
- *
- * If cleared, and either BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE are set,
- * we're running atomic triggers during a transaction commit: we have our
- * journal reservation, we're holding btree node write locks, and we know the
- * transaction is going to commit (returning an error here is a fatal error,
- * causing us to go emergency read-only)
- */
-#define BTREE_TRIGGER_TRANSACTIONAL (1U << __BTREE_TRIGGER_TRANSACTIONAL)
-#define BTREE_TRIGGER_ATOMIC (1U << __BTREE_TRIGGER_ATOMIC)
-
-/* We're in gc/fsck: running triggers to recalculate e.g. disk usage */
-#define BTREE_TRIGGER_GC (1U << __BTREE_TRIGGER_GC)
-
-/* @new is entering the btree */
-#define BTREE_TRIGGER_INSERT (1U << __BTREE_TRIGGER_INSERT)
-
-/* @old is leaving the btree */
-#define BTREE_TRIGGER_OVERWRITE (1U << __BTREE_TRIGGER_OVERWRITE)
-
-/* signal from bucket invalidate path to alloc trigger */
-#define BTREE_TRIGGER_BUCKET_INVALIDATE (1U << __BTREE_TRIGGER_BUCKET_INVALIDATE)
-
static inline int bch2_key_trigger(struct btree_trans *trans,
enum btree_id btree, unsigned level,
struct bkey_s_c old, struct bkey_s new,
- unsigned flags)
+ enum btree_iter_update_trigger_flags flags)
{
const struct bkey_ops *ops = bch2_bkey_type_ops(old.k->type ?: new.k->type);
@@ -135,8 +90,9 @@ static inline int bch2_key_trigger(struct btree_trans *trans,
}
static inline int bch2_key_trigger_old(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c old, unsigned flags)
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s_c old,
+ enum btree_iter_update_trigger_flags flags)
{
struct bkey_i deleted;
@@ -144,12 +100,13 @@ static inline int bch2_key_trigger_old(struct btree_trans *trans,
deleted.k.p = old.k->p;
return bch2_key_trigger(trans, btree_id, level, old, bkey_i_to_s(&deleted),
- BTREE_TRIGGER_OVERWRITE|flags);
+ BTREE_TRIGGER_overwrite|flags);
}
static inline int bch2_key_trigger_new(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s new, unsigned flags)
+ enum btree_id btree_id, unsigned level,
+ struct bkey_s new,
+ enum btree_iter_update_trigger_flags flags)
{
struct bkey_i deleted;
@@ -157,7 +114,7 @@ static inline int bch2_key_trigger_new(struct btree_trans *trans,
deleted.k.p = new.k->p;
return bch2_key_trigger(trans, btree_id, level, bkey_i_to_s_c(&deleted), new,
- BTREE_TRIGGER_INSERT|flags);
+ BTREE_TRIGGER_insert|flags);
}
void bch2_bkey_renumber(enum btree_node_type, struct bkey_packed *, int);
diff --git a/fs/bcachefs/bkey_sort.c b/fs/bcachefs/bkey_sort.c
index bcca9e76a0b4..4536eb50fc40 100644
--- a/fs/bcachefs/bkey_sort.c
+++ b/fs/bcachefs/bkey_sort.c
@@ -6,9 +6,9 @@
#include "bset.h"
#include "extents.h"
-typedef int (*sort_cmp_fn)(struct btree *,
- struct bkey_packed *,
- struct bkey_packed *);
+typedef int (*sort_cmp_fn)(const struct btree *,
+ const struct bkey_packed *,
+ const struct bkey_packed *);
static inline bool sort_iter_end(struct sort_iter *iter)
{
@@ -70,9 +70,9 @@ static inline struct bkey_packed *sort_iter_next(struct sort_iter *iter,
/*
* If keys compare equal, compare by pointer order:
*/
-static inline int key_sort_fix_overlapping_cmp(struct btree *b,
- struct bkey_packed *l,
- struct bkey_packed *r)
+static inline int key_sort_fix_overlapping_cmp(const struct btree *b,
+ const struct bkey_packed *l,
+ const struct bkey_packed *r)
{
return bch2_bkey_cmp_packed(b, l, r) ?:
cmp_int((unsigned long) l, (unsigned long) r);
@@ -154,46 +154,59 @@ bch2_sort_repack(struct bset *dst, struct btree *src,
return nr;
}
-static inline int sort_keys_cmp(struct btree *b,
- struct bkey_packed *l,
- struct bkey_packed *r)
+static inline int keep_unwritten_whiteouts_cmp(const struct btree *b,
+ const struct bkey_packed *l,
+ const struct bkey_packed *r)
{
return bch2_bkey_cmp_packed_inlined(b, l, r) ?:
(int) bkey_deleted(r) - (int) bkey_deleted(l) ?:
- (int) l->needs_whiteout - (int) r->needs_whiteout;
+ (long) l - (long) r;
}
-unsigned bch2_sort_keys(struct bkey_packed *dst,
- struct sort_iter *iter,
- bool filter_whiteouts)
+#include "btree_update_interior.h"
+
+/*
+ * For sorting in the btree node write path: whiteouts not in the unwritten
+ * whiteouts area are dropped, whiteouts in the unwritten whiteouts area are
+ * dropped if overwritten by real keys:
+ */
+unsigned bch2_sort_keys_keep_unwritten_whiteouts(struct bkey_packed *dst, struct sort_iter *iter)
{
- const struct bkey_format *f = &iter->b->format;
struct bkey_packed *in, *next, *out = dst;
- sort_iter_sort(iter, sort_keys_cmp);
+ sort_iter_sort(iter, keep_unwritten_whiteouts_cmp);
- while ((in = sort_iter_next(iter, sort_keys_cmp))) {
- bool needs_whiteout = false;
+ while ((in = sort_iter_next(iter, keep_unwritten_whiteouts_cmp))) {
+ if (bkey_deleted(in) && in < unwritten_whiteouts_start(iter->b))
+ continue;
- if (bkey_deleted(in) &&
- (filter_whiteouts || !in->needs_whiteout))
+ if ((next = sort_iter_peek(iter)) &&
+ !bch2_bkey_cmp_packed_inlined(iter->b, in, next))
continue;
- while ((next = sort_iter_peek(iter)) &&
- !bch2_bkey_cmp_packed_inlined(iter->b, in, next)) {
- BUG_ON(in->needs_whiteout &&
- next->needs_whiteout);
- needs_whiteout |= in->needs_whiteout;
- in = sort_iter_next(iter, sort_keys_cmp);
- }
+ bkey_p_copy(out, in);
+ out = bkey_p_next(out);
+ }
- if (bkey_deleted(in)) {
- memcpy_u64s_small(out, in, bkeyp_key_u64s(f, in));
- set_bkeyp_val_u64s(f, out, 0);
- } else {
- bkey_p_copy(out, in);
- }
- out->needs_whiteout |= needs_whiteout;
+ return (u64 *) out - (u64 *) dst;
+}
+
+/*
+ * Main sort routine for compacting a btree node in memory: we always drop
+ * whiteouts because any whiteouts that need to be written are in the unwritten
+ * whiteouts area:
+ */
+unsigned bch2_sort_keys(struct bkey_packed *dst, struct sort_iter *iter)
+{
+ struct bkey_packed *in, *out = dst;
+
+ sort_iter_sort(iter, bch2_bkey_cmp_packed_inlined);
+
+ while ((in = sort_iter_next(iter, bch2_bkey_cmp_packed_inlined))) {
+ if (bkey_deleted(in))
+ continue;
+
+ bkey_p_copy(out, in);
out = bkey_p_next(out);
}
diff --git a/fs/bcachefs/bkey_sort.h b/fs/bcachefs/bkey_sort.h
index 7c0f0b160f18..9be969d46890 100644
--- a/fs/bcachefs/bkey_sort.h
+++ b/fs/bcachefs/bkey_sort.h
@@ -48,7 +48,7 @@ bch2_sort_repack(struct bset *, struct btree *,
struct btree_node_iter *,
struct bkey_format *, bool);
-unsigned bch2_sort_keys(struct bkey_packed *,
- struct sort_iter *, bool);
+unsigned bch2_sort_keys_keep_unwritten_whiteouts(struct bkey_packed *, struct sort_iter *);
+unsigned bch2_sort_keys(struct bkey_packed *, struct sort_iter *);
#endif /* _BCACHEFS_BKEY_SORT_H */
diff --git a/fs/bcachefs/bset.c b/fs/bcachefs/bset.c
index 3fd1085b6c61..575e1d0b6eeb 100644
--- a/fs/bcachefs/bset.c
+++ b/fs/bcachefs/bset.c
@@ -103,8 +103,6 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b,
void bch2_dump_btree_node(struct bch_fs *c, struct btree *b)
{
- struct bset_tree *t;
-
console_lock();
for_each_bset(b, t)
bch2_dump_bset(c, b, bset(b, t), t - b->set);
@@ -134,18 +132,23 @@ void bch2_dump_btree_node_iter(struct btree *b,
printbuf_exit(&buf);
}
-#ifdef CONFIG_BCACHEFS_DEBUG
-
-void __bch2_verify_btree_nr_keys(struct btree *b)
+struct btree_nr_keys bch2_btree_node_count_keys(struct btree *b)
{
- struct bset_tree *t;
struct bkey_packed *k;
- struct btree_nr_keys nr = { 0 };
+ struct btree_nr_keys nr = {};
for_each_bset(b, t)
bset_tree_for_each_key(b, t, k)
if (!bkey_deleted(k))
btree_keys_account_key_add(&nr, t - b->set, k);
+ return nr;
+}
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+
+void __bch2_verify_btree_nr_keys(struct btree *b)
+{
+ struct btree_nr_keys nr = bch2_btree_node_count_keys(b);
BUG_ON(memcmp(&nr, &b->nr, sizeof(nr)));
}
@@ -192,7 +195,6 @@ void bch2_btree_node_iter_verify(struct btree_node_iter *iter,
{
struct btree_node_iter_set *set, *s2;
struct bkey_packed *k, *p;
- struct bset_tree *t;
if (bch2_btree_node_iter_end(iter))
return;
@@ -207,12 +209,14 @@ void bch2_btree_node_iter_verify(struct btree_node_iter *iter,
/* Verify that set->end is correct: */
btree_node_iter_for_each(iter, set) {
for_each_bset(b, t)
- if (set->end == t->end_offset)
+ if (set->end == t->end_offset) {
+ BUG_ON(set->k < btree_bkey_first_offset(t) ||
+ set->k >= t->end_offset);
goto found;
+ }
BUG();
found:
- BUG_ON(set->k < btree_bkey_first_offset(t) ||
- set->k >= t->end_offset);
+ do {} while (0);
}
/* Verify iterator is sorted: */
@@ -371,11 +375,9 @@ static struct bkey_float *bkey_float(const struct btree *b,
return ro_aux_tree_base(b, t)->f + idx;
}
-static void bset_aux_tree_verify(const struct btree *b)
+static void bset_aux_tree_verify(struct btree *b)
{
#ifdef CONFIG_BCACHEFS_DEBUG
- const struct bset_tree *t;
-
for_each_bset(b, t) {
if (t->aux_data_offset == U16_MAX)
continue;
@@ -679,20 +681,20 @@ static __always_inline void make_bfloat(struct btree *b, struct bset_tree *t,
}
/* bytes remaining - only valid for last bset: */
-static unsigned __bset_tree_capacity(const struct btree *b, const struct bset_tree *t)
+static unsigned __bset_tree_capacity(struct btree *b, const struct bset_tree *t)
{
bset_aux_tree_verify(b);
return btree_aux_data_bytes(b) - t->aux_data_offset * sizeof(u64);
}
-static unsigned bset_ro_tree_capacity(const struct btree *b, const struct bset_tree *t)
+static unsigned bset_ro_tree_capacity(struct btree *b, const struct bset_tree *t)
{
return __bset_tree_capacity(b, t) /
(sizeof(struct bkey_float) + sizeof(u8));
}
-static unsigned bset_rw_tree_capacity(const struct btree *b, const struct bset_tree *t)
+static unsigned bset_rw_tree_capacity(struct btree *b, const struct bset_tree *t)
{
return __bset_tree_capacity(b, t) / sizeof(struct rw_aux_tree);
}
@@ -1368,8 +1370,6 @@ void bch2_btree_node_iter_init(struct btree_node_iter *iter,
void bch2_btree_node_iter_init_from_start(struct btree_node_iter *iter,
struct btree *b)
{
- struct bset_tree *t;
-
memset(iter, 0, sizeof(*iter));
for_each_bset(b, t)
@@ -1475,7 +1475,6 @@ struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *iter,
{
struct bkey_packed *k, *prev = NULL;
struct btree_node_iter_set *set;
- struct bset_tree *t;
unsigned end = 0;
if (bch2_expensive_debug_checks)
@@ -1544,9 +1543,7 @@ struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *iter,
void bch2_btree_keys_stats(const struct btree *b, struct bset_stats *stats)
{
- const struct bset_tree *t;
-
- for_each_bset(b, t) {
+ for_each_bset_c(b, t) {
enum bset_aux_tree_type type = bset_aux_tree_type(t);
size_t j;
diff --git a/fs/bcachefs/bset.h b/fs/bcachefs/bset.h
index 79c77baaa383..5c6c7a14fa0f 100644
--- a/fs/bcachefs/bset.h
+++ b/fs/bcachefs/bset.h
@@ -206,7 +206,10 @@ static inline size_t btree_aux_data_u64s(const struct btree *b)
}
#define for_each_bset(_b, _t) \
- for (_t = (_b)->set; _t < (_b)->set + (_b)->nsets; _t++)
+ for (struct bset_tree *_t = (_b)->set; _t < (_b)->set + (_b)->nsets; _t++)
+
+#define for_each_bset_c(_b, _t) \
+ for (const struct bset_tree *_t = (_b)->set; _t < (_b)->set + (_b)->nsets; _t++)
#define bset_tree_for_each_key(_b, _t, _k) \
for (_k = btree_bkey_first(_b, _t); \
@@ -294,7 +297,6 @@ static inline struct bset_tree *
bch2_bkey_to_bset_inlined(struct btree *b, struct bkey_packed *k)
{
unsigned offset = __btree_node_key_to_offset(b, k);
- struct bset_tree *t;
for_each_bset(b, t)
if (offset <= t->end_offset) {
@@ -458,6 +460,8 @@ struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *,
/* Accounting: */
+struct btree_nr_keys bch2_btree_node_count_keys(struct btree *);
+
static inline void btree_keys_account_key(struct btree_nr_keys *n,
unsigned bset,
struct bkey_packed *k,
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index 562561a9a510..9e4ed75d3675 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -16,6 +16,12 @@
#include <linux/prefetch.h>
#include <linux/sched/mm.h>
+#define BTREE_CACHE_NOT_FREED_INCREMENT(counter) \
+do { \
+ if (shrinker_counter) \
+ bc->not_freed_##counter++; \
+} while (0)
+
const char * const bch2_btree_node_flags[] = {
#define x(f) #f,
BTREE_FLAGS()
@@ -162,6 +168,9 @@ void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
/* Cause future lookups for this node to fail: */
b->hash_val = 0;
+
+ if (b->c.btree_id < BTREE_ID_NR)
+ --bc->used_by_btree[b->c.btree_id];
}
int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b)
@@ -169,8 +178,11 @@ int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b)
BUG_ON(b->hash_val);
b->hash_val = btree_ptr_hash_val(&b->key);
- return rhashtable_lookup_insert_fast(&bc->table, &b->hash,
- bch_btree_cache_params);
+ int ret = rhashtable_lookup_insert_fast(&bc->table, &b->hash,
+ bch_btree_cache_params);
+ if (!ret && b->c.btree_id < BTREE_ID_NR)
+ bc->used_by_btree[b->c.btree_id]++;
+ return ret;
}
int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b,
@@ -190,6 +202,35 @@ int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b,
return ret;
}
+void bch2_btree_node_update_key_early(struct btree_trans *trans,
+ enum btree_id btree, unsigned level,
+ struct bkey_s_c old, struct bkey_i *new)
+{
+ struct bch_fs *c = trans->c;
+ struct btree *b;
+ struct bkey_buf tmp;
+ int ret;
+
+ bch2_bkey_buf_init(&tmp);
+ bch2_bkey_buf_reassemble(&tmp, c, old);
+
+ b = bch2_btree_node_get_noiter(trans, tmp.k, btree, level, true);
+ if (!IS_ERR_OR_NULL(b)) {
+ mutex_lock(&c->btree_cache.lock);
+
+ bch2_btree_node_hash_remove(&c->btree_cache, b);
+
+ bkey_copy(&b->key, new);
+ ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
+ BUG_ON(ret);
+
+ mutex_unlock(&c->btree_cache.lock);
+ six_unlock_read(&b->c.lock);
+ }
+
+ bch2_bkey_buf_exit(&tmp, c);
+}
+
__flatten
static inline struct btree *btree_cache_find(struct btree_cache *bc,
const struct bkey_i *k)
@@ -203,7 +244,7 @@ static inline struct btree *btree_cache_find(struct btree_cache *bc,
* this version is for btree nodes that have already been freed (we're not
* reaping a real btree node)
*/
-static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
+static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush, bool shrinker_counter)
{
struct btree_cache *bc = &c->btree_cache;
int ret = 0;
@@ -225,38 +266,64 @@ wait_on_io:
if (b->flags & ((1U << BTREE_NODE_dirty)|
(1U << BTREE_NODE_read_in_flight)|
(1U << BTREE_NODE_write_in_flight))) {
- if (!flush)
+ if (!flush) {
+ if (btree_node_dirty(b))
+ BTREE_CACHE_NOT_FREED_INCREMENT(dirty);
+ else if (btree_node_read_in_flight(b))
+ BTREE_CACHE_NOT_FREED_INCREMENT(read_in_flight);
+ else if (btree_node_write_in_flight(b))
+ BTREE_CACHE_NOT_FREED_INCREMENT(write_in_flight);
return -BCH_ERR_ENOMEM_btree_node_reclaim;
+ }
/* XXX: waiting on IO with btree cache lock held */
bch2_btree_node_wait_on_read(b);
bch2_btree_node_wait_on_write(b);
}
- if (!six_trylock_intent(&b->c.lock))
+ if (!six_trylock_intent(&b->c.lock)) {
+ BTREE_CACHE_NOT_FREED_INCREMENT(lock_intent);
return -BCH_ERR_ENOMEM_btree_node_reclaim;
+ }
- if (!six_trylock_write(&b->c.lock))
+ if (!six_trylock_write(&b->c.lock)) {
+ BTREE_CACHE_NOT_FREED_INCREMENT(lock_write);
goto out_unlock_intent;
+ }
/* recheck under lock */
if (b->flags & ((1U << BTREE_NODE_read_in_flight)|
(1U << BTREE_NODE_write_in_flight))) {
- if (!flush)
+ if (!flush) {
+ if (btree_node_read_in_flight(b))
+ BTREE_CACHE_NOT_FREED_INCREMENT(read_in_flight);
+ else if (btree_node_write_in_flight(b))
+ BTREE_CACHE_NOT_FREED_INCREMENT(write_in_flight);
goto out_unlock;
+ }
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
goto wait_on_io;
}
- if (btree_node_noevict(b) ||
- btree_node_write_blocked(b) ||
- btree_node_will_make_reachable(b))
+ if (btree_node_noevict(b)) {
+ BTREE_CACHE_NOT_FREED_INCREMENT(noevict);
+ goto out_unlock;
+ }
+ if (btree_node_write_blocked(b)) {
+ BTREE_CACHE_NOT_FREED_INCREMENT(write_blocked);
goto out_unlock;
+ }
+ if (btree_node_will_make_reachable(b)) {
+ BTREE_CACHE_NOT_FREED_INCREMENT(will_make_reachable);
+ goto out_unlock;
+ }
if (btree_node_dirty(b)) {
- if (!flush)
+ if (!flush) {
+ BTREE_CACHE_NOT_FREED_INCREMENT(dirty);
goto out_unlock;
+ }
/*
* Using the underscore version because we don't want to compact
* bsets after the write, since this node is about to be evicted
@@ -286,14 +353,14 @@ out_unlock_intent:
goto out;
}
-static int btree_node_reclaim(struct bch_fs *c, struct btree *b)
+static int btree_node_reclaim(struct bch_fs *c, struct btree *b, bool shrinker_counter)
{
- return __btree_node_reclaim(c, b, false);
+ return __btree_node_reclaim(c, b, false, shrinker_counter);
}
static int btree_node_write_and_reclaim(struct bch_fs *c, struct btree *b)
{
- return __btree_node_reclaim(c, b, true);
+ return __btree_node_reclaim(c, b, true, false);
}
static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
@@ -341,11 +408,12 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
if (touched >= nr)
goto out;
- if (!btree_node_reclaim(c, b)) {
+ if (!btree_node_reclaim(c, b, true)) {
btree_node_data_free(c, b);
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
freed++;
+ bc->freed++;
}
}
restart:
@@ -354,9 +422,11 @@ restart:
if (btree_node_accessed(b)) {
clear_btree_node_accessed(b);
- } else if (!btree_node_reclaim(c, b)) {
+ bc->not_freed_access_bit++;
+ } else if (!btree_node_reclaim(c, b, true)) {
freed++;
btree_node_data_free(c, b);
+ bc->freed++;
bch2_btree_node_hash_remove(bc, b);
six_unlock_write(&b->c.lock);
@@ -564,7 +634,7 @@ static struct btree *btree_node_cannibalize(struct bch_fs *c)
struct btree *b;
list_for_each_entry_reverse(b, &bc->live, list)
- if (!btree_node_reclaim(c, b))
+ if (!btree_node_reclaim(c, b, false))
return b;
while (1) {
@@ -600,7 +670,7 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea
* disk node. Check the freed list before allocating a new one:
*/
list_for_each_entry(b, freed, list)
- if (!btree_node_reclaim(c, b)) {
+ if (!btree_node_reclaim(c, b, false)) {
list_del_init(&b->list);
goto got_node;
}
@@ -626,7 +696,7 @@ got_node:
* the list. Check if there's any freed nodes there:
*/
list_for_each_entry(b2, &bc->freeable, list)
- if (!btree_node_reclaim(c, b2)) {
+ if (!btree_node_reclaim(c, b2, false)) {
swap(b->data, b2->data);
swap(b->aux_data, b2->aux_data);
btree_node_to_freedlist(bc, b2);
@@ -709,9 +779,31 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
- u32 seq;
- BUG_ON(level + 1 >= BTREE_MAX_DEPTH);
+ if (unlikely(level >= BTREE_MAX_DEPTH)) {
+ int ret = bch2_fs_topology_error(c, "attempting to get btree node at level %u, >= max depth %u",
+ level, BTREE_MAX_DEPTH);
+ return ERR_PTR(ret);
+ }
+
+ if (unlikely(!bkey_is_btree_ptr(&k->k))) {
+ struct printbuf buf = PRINTBUF;
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
+
+ int ret = bch2_fs_topology_error(c, "attempting to get btree node with non-btree key %s", buf.buf);
+ printbuf_exit(&buf);
+ return ERR_PTR(ret);
+ }
+
+ if (unlikely(k->k.u64s > BKEY_BTREE_PTR_U64s_MAX)) {
+ struct printbuf buf = PRINTBUF;
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
+
+ int ret = bch2_fs_topology_error(c, "attempting to get btree node with too big key %s", buf.buf);
+ printbuf_exit(&buf);
+ return ERR_PTR(ret);
+ }
+
/*
* Parent node must be locked, else we could read in a btree node that's
* been freed:
@@ -752,34 +844,26 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
}
set_btree_node_read_in_flight(b);
-
six_unlock_write(&b->c.lock);
- seq = six_lock_seq(&b->c.lock);
- six_unlock_intent(&b->c.lock);
- /* Unlock before doing IO: */
- if (path && sync)
- bch2_trans_unlock_noassert(trans);
-
- bch2_btree_node_read(trans, b, sync);
+ if (path) {
+ u32 seq = six_lock_seq(&b->c.lock);
- if (!sync)
- return NULL;
+ /* Unlock before doing IO: */
+ six_unlock_intent(&b->c.lock);
+ bch2_trans_unlock_noassert(trans);
- if (path) {
- int ret = bch2_trans_relock(trans) ?:
- bch2_btree_path_relock_intent(trans, path);
- if (ret) {
- BUG_ON(!trans->restarted);
- return ERR_PTR(ret);
- }
- }
+ bch2_btree_node_read(trans, b, sync);
- if (!six_relock_type(&b->c.lock, lock_type, seq)) {
- BUG_ON(!path);
+ if (!sync)
+ return NULL;
- trace_and_count(c, trans_restart_relock_after_fill, trans, _THIS_IP_, path);
- return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_after_fill));
+ if (!six_relock_type(&b->c.lock, lock_type, seq))
+ b = NULL;
+ } else {
+ bch2_btree_node_read(trans, b, sync);
+ if (lock_type == SIX_LOCK_read)
+ six_lock_downgrade(&b->c.lock);
}
return b;
@@ -808,7 +892,8 @@ static noinline void btree_bad_header(struct bch_fs *c, struct btree *b)
prt_printf(&buf, "\nmax ");
bch2_bpos_to_text(&buf, b->data->max_key);
- bch2_fs_inconsistent(c, "%s", buf.buf);
+ bch2_fs_topology_error(c, "%s", buf.buf);
+
printbuf_exit(&buf);
}
@@ -831,7 +916,6 @@ static struct btree *__bch2_btree_node_get(struct btree_trans *trans, struct btr
struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
- struct bset_tree *t;
bool need_relock = false;
int ret;
@@ -951,7 +1035,6 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *
{
struct bch_fs *c = trans->c;
struct btree *b;
- struct bset_tree *t;
int ret;
EBUG_ON(level >= BTREE_MAX_DEPTH);
@@ -1028,7 +1111,6 @@ struct btree *bch2_btree_node_get_noiter(struct btree_trans *trans,
struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
- struct bset_tree *t;
int ret;
EBUG_ON(level >= BTREE_MAX_DEPTH);
@@ -1111,18 +1193,19 @@ int bch2_btree_node_prefetch(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache;
- struct btree *b;
BUG_ON(path && !btree_node_locked(path, level + 1));
BUG_ON(level >= BTREE_MAX_DEPTH);
- b = btree_cache_find(bc, k);
+ struct btree *b = btree_cache_find(bc, k);
if (b)
return 0;
b = bch2_btree_node_fill(trans, path, k, btree_id,
level, SIX_LOCK_read, false);
- return PTR_ERR_OR_ZERO(b);
+ if (!IS_ERR_OR_NULL(b))
+ six_unlock_read(&b->c.lock);
+ return bch2_trans_relock(trans) ?: PTR_ERR_OR_ZERO(b);
}
void bch2_btree_node_evict(struct btree_trans *trans, const struct bkey_i *k)
@@ -1134,6 +1217,8 @@ void bch2_btree_node_evict(struct btree_trans *trans, const struct bkey_i *k)
b = btree_cache_find(bc, k);
if (!b)
return;
+
+ BUG_ON(b == btree_node_root(trans->c, b));
wait_on_io:
/* not allowed to wait on io with btree locks held: */
@@ -1145,6 +1230,8 @@ wait_on_io:
btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent);
btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write);
+ if (unlikely(b->hash_val != btree_ptr_hash_val(k)))
+ goto out;
if (btree_node_dirty(b)) {
__bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim);
@@ -1159,7 +1246,7 @@ wait_on_io:
btree_node_data_free(c, b);
bch2_btree_node_hash_remove(bc, b);
mutex_unlock(&bc->lock);
-
+out:
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
}
@@ -1220,9 +1307,39 @@ void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struc
stats.failed);
}
-void bch2_btree_cache_to_text(struct printbuf *out, const struct bch_fs *c)
+static void prt_btree_cache_line(struct printbuf *out, const struct bch_fs *c,
+ const char *label, unsigned nr)
{
- prt_printf(out, "nr nodes:\t\t%u\n", c->btree_cache.used);
- prt_printf(out, "nr dirty:\t\t%u\n", atomic_read(&c->btree_cache.dirty));
- prt_printf(out, "cannibalize lock:\t%p\n", c->btree_cache.alloc_lock);
+ prt_printf(out, "%s\t", label);
+ prt_human_readable_u64(out, nr * c->opts.btree_node_size);
+ prt_printf(out, " (%u)\n", nr);
+}
+
+void bch2_btree_cache_to_text(struct printbuf *out, const struct btree_cache *bc)
+{
+ struct bch_fs *c = container_of(bc, struct bch_fs, btree_cache);
+
+ if (!out->nr_tabstops)
+ printbuf_tabstop_push(out, 32);
+
+ prt_btree_cache_line(out, c, "total:", bc->used);
+ prt_btree_cache_line(out, c, "nr dirty:", atomic_read(&bc->dirty));
+ prt_printf(out, "cannibalize lock:\t%p\n", bc->alloc_lock);
+ prt_newline(out);
+
+ for (unsigned i = 0; i < ARRAY_SIZE(bc->used_by_btree); i++)
+ prt_btree_cache_line(out, c, bch2_btree_id_str(i), bc->used_by_btree[i]);
+
+ prt_newline(out);
+ prt_printf(out, "freed:\t%u\n", bc->freed);
+ prt_printf(out, "not freed:\n");
+ prt_printf(out, " dirty\t%u\n", bc->not_freed_dirty);
+ prt_printf(out, " write in flight\t%u\n", bc->not_freed_write_in_flight);
+ prt_printf(out, " read in flight\t%u\n", bc->not_freed_read_in_flight);
+ prt_printf(out, " lock intent failed\t%u\n", bc->not_freed_lock_intent);
+ prt_printf(out, " lock write failed\t%u\n", bc->not_freed_lock_write);
+ prt_printf(out, " access bit\t%u\n", bc->not_freed_access_bit);
+ prt_printf(out, " no evict failed\t%u\n", bc->not_freed_noevict);
+ prt_printf(out, " write blocked\t%u\n", bc->not_freed_write_blocked);
+ prt_printf(out, " will make reachable\t%u\n", bc->not_freed_will_make_reachable);
}
diff --git a/fs/bcachefs/btree_cache.h b/fs/bcachefs/btree_cache.h
index 6d33885fdbde..fed35de3e4de 100644
--- a/fs/bcachefs/btree_cache.h
+++ b/fs/bcachefs/btree_cache.h
@@ -17,6 +17,9 @@ int __bch2_btree_node_hash_insert(struct btree_cache *, struct btree *);
int bch2_btree_node_hash_insert(struct btree_cache *, struct btree *,
unsigned, enum btree_id);
+void bch2_btree_node_update_key_early(struct btree_trans *, enum btree_id, unsigned,
+ struct bkey_s_c, struct bkey_i *);
+
void bch2_btree_cache_cannibalize_unlock(struct btree_trans *);
int bch2_btree_cache_cannibalize_lock(struct btree_trans *, struct closure *);
@@ -131,6 +134,6 @@ static inline struct btree *btree_node_root(struct bch_fs *c, struct btree *b)
const char *bch2_btree_id_str(enum btree_id);
void bch2_btree_pos_to_text(struct printbuf *, struct bch_fs *, const struct btree *);
void bch2_btree_node_to_text(struct printbuf *, struct bch_fs *, const struct btree *);
-void bch2_btree_cache_to_text(struct printbuf *, const struct bch_fs *);
+void bch2_btree_cache_to_text(struct printbuf *, const struct btree_cache *);
#endif /* _BCACHEFS_BTREE_CACHE_H */
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index bdaed29f084a..8035c8b797ab 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -7,11 +7,13 @@
#include "bcachefs.h"
#include "alloc_background.h"
#include "alloc_foreground.h"
+#include "backpointers.h"
#include "bkey_methods.h"
#include "bkey_buf.h"
#include "btree_journal_iter.h"
#include "btree_key_cache.h"
#include "btree_locking.h"
+#include "btree_node_scan.h"
#include "btree_update_interior.h"
#include "btree_io.h"
#include "btree_gc.h"
@@ -24,7 +26,7 @@
#include "journal.h"
#include "keylist.h"
#include "move.h"
-#include "recovery.h"
+#include "recovery_passes.h"
#include "reflink.h"
#include "replicas.h"
#include "super-io.h"
@@ -40,6 +42,7 @@
#define DROP_THIS_NODE 10
#define DROP_PREV_NODE 11
+#define DID_FILL_FROM_SCAN 12
static struct bkey_s unsafe_bkey_s_c_to_s(struct bkey_s_c k)
{
@@ -49,12 +52,6 @@ static struct bkey_s unsafe_bkey_s_c_to_s(struct bkey_s_c k)
}}};
}
-static bool should_restart_for_topology_repair(struct bch_fs *c)
-{
- return c->opts.fix_errors != FSCK_FIX_no &&
- !(c->recovery_passes_complete & BIT_ULL(BCH_RECOVERY_PASS_check_topology));
-}
-
static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
{
preempt_disable();
@@ -66,94 +63,10 @@ static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
static inline void gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
{
- BUG_ON(gc_pos_cmp(new_pos, c->gc_pos) <= 0);
+ BUG_ON(gc_pos_cmp(new_pos, c->gc_pos) < 0);
__gc_pos_set(c, new_pos);
}
-/*
- * Missing: if an interior btree node is empty, we need to do something -
- * perhaps just kill it
- */
-static int bch2_gc_check_topology(struct bch_fs *c,
- struct btree *b,
- struct bkey_buf *prev,
- struct bkey_buf cur,
- bool is_last)
-{
- struct bpos node_start = b->data->min_key;
- struct bpos node_end = b->data->max_key;
- struct bpos expected_start = bkey_deleted(&prev->k->k)
- ? node_start
- : bpos_successor(prev->k->k.p);
- struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
- int ret = 0;
-
- if (cur.k->k.type == KEY_TYPE_btree_ptr_v2) {
- struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(cur.k);
-
- if (!bpos_eq(expected_start, bp->v.min_key)) {
- bch2_topology_error(c);
-
- if (bkey_deleted(&prev->k->k)) {
- prt_printf(&buf1, "start of node: ");
- bch2_bpos_to_text(&buf1, node_start);
- } else {
- bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(prev->k));
- }
- bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(cur.k));
-
- if (__fsck_err(c,
- FSCK_CAN_FIX|
- FSCK_CAN_IGNORE|
- FSCK_NO_RATELIMIT,
- btree_node_topology_bad_min_key,
- "btree node with incorrect min_key at btree %s level %u:\n"
- " prev %s\n"
- " cur %s",
- bch2_btree_id_str(b->c.btree_id), b->c.level,
- buf1.buf, buf2.buf) && should_restart_for_topology_repair(c)) {
- bch_info(c, "Halting mark and sweep to start topology repair pass");
- ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
- goto err;
- } else {
- set_bit(BCH_FS_initial_gc_unfixed, &c->flags);
- }
- }
- }
-
- if (is_last && !bpos_eq(cur.k->k.p, node_end)) {
- bch2_topology_error(c);
-
- printbuf_reset(&buf1);
- printbuf_reset(&buf2);
-
- bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(cur.k));
- bch2_bpos_to_text(&buf2, node_end);
-
- if (__fsck_err(c, FSCK_CAN_FIX|FSCK_CAN_IGNORE|FSCK_NO_RATELIMIT,
- btree_node_topology_bad_max_key,
- "btree node with incorrect max_key at btree %s level %u:\n"
- " %s\n"
- " expected %s",
- bch2_btree_id_str(b->c.btree_id), b->c.level,
- buf1.buf, buf2.buf) &&
- should_restart_for_topology_repair(c)) {
- bch_info(c, "Halting mark and sweep to start topology repair pass");
- ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
- goto err;
- } else {
- set_bit(BCH_FS_initial_gc_unfixed, &c->flags);
- }
- }
-
- bch2_bkey_buf_copy(prev, c, cur.k);
-err:
-fsck_err:
- printbuf_exit(&buf2);
- printbuf_exit(&buf1);
- return ret;
-}
-
static void btree_ptr_to_v2(struct btree *b, struct bkey_i_btree_ptr_v2 *dst)
{
switch (b->key.k.type) {
@@ -178,40 +91,22 @@ static void btree_ptr_to_v2(struct btree *b, struct bkey_i_btree_ptr_v2 *dst)
}
}
-static void bch2_btree_node_update_key_early(struct btree_trans *trans,
- enum btree_id btree, unsigned level,
- struct bkey_s_c old, struct bkey_i *new)
+static int set_node_min(struct bch_fs *c, struct btree *b, struct bpos new_min)
{
- struct bch_fs *c = trans->c;
- struct btree *b;
- struct bkey_buf tmp;
+ struct bkey_i_btree_ptr_v2 *new;
int ret;
- bch2_bkey_buf_init(&tmp);
- bch2_bkey_buf_reassemble(&tmp, c, old);
-
- b = bch2_btree_node_get_noiter(trans, tmp.k, btree, level, true);
- if (!IS_ERR_OR_NULL(b)) {
- mutex_lock(&c->btree_cache.lock);
+ if (c->opts.verbose) {
+ struct printbuf buf = PRINTBUF;
- bch2_btree_node_hash_remove(&c->btree_cache, b);
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
+ prt_str(&buf, " -> ");
+ bch2_bpos_to_text(&buf, new_min);
- bkey_copy(&b->key, new);
- ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
- BUG_ON(ret);
-
- mutex_unlock(&c->btree_cache.lock);
- six_unlock_read(&b->c.lock);
+ bch_info(c, "%s(): %s", __func__, buf.buf);
+ printbuf_exit(&buf);
}
- bch2_bkey_buf_exit(&tmp, c);
-}
-
-static int set_node_min(struct bch_fs *c, struct btree *b, struct bpos new_min)
-{
- struct bkey_i_btree_ptr_v2 *new;
- int ret;
-
new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
if (!new)
return -BCH_ERR_ENOMEM_gc_repair_key;
@@ -237,6 +132,17 @@ static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max)
struct bkey_i_btree_ptr_v2 *new;
int ret;
+ if (c->opts.verbose) {
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
+ prt_str(&buf, " -> ");
+ bch2_bpos_to_text(&buf, new_max);
+
+ bch_info(c, "%s(): %s", __func__, buf.buf);
+ printbuf_exit(&buf);
+ }
+
ret = bch2_journal_key_delete(c, b->c.btree_id, b->c.level + 1, b->key.k.p);
if (ret)
return ret;
@@ -268,127 +174,138 @@ static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max)
return 0;
}
-static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
- struct btree *prev, struct btree *cur)
+static int btree_check_node_boundaries(struct bch_fs *c, struct btree *b,
+ struct btree *prev, struct btree *cur,
+ struct bpos *pulled_from_scan)
{
struct bpos expected_start = !prev
? b->data->min_key
: bpos_successor(prev->key.k.p);
- struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
+ struct printbuf buf = PRINTBUF;
int ret = 0;
- if (!prev) {
- prt_printf(&buf1, "start of node: ");
- bch2_bpos_to_text(&buf1, b->data->min_key);
- } else {
- bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(&prev->key));
+ BUG_ON(b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
+ !bpos_eq(bkey_i_to_btree_ptr_v2(&b->key)->v.min_key,
+ b->data->min_key));
+
+ if (bpos_eq(expected_start, cur->data->min_key))
+ return 0;
+
+ prt_printf(&buf, " at btree %s level %u:\n parent: ",
+ bch2_btree_id_str(b->c.btree_id), b->c.level);
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
+
+ if (prev) {
+ prt_printf(&buf, "\n prev: ");
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&prev->key));
}
- bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&cur->key));
-
- if (prev &&
- bpos_gt(expected_start, cur->data->min_key) &&
- BTREE_NODE_SEQ(cur->data) > BTREE_NODE_SEQ(prev->data)) {
- /* cur overwrites prev: */
-
- if (mustfix_fsck_err_on(bpos_ge(prev->data->min_key,
- cur->data->min_key), c,
- btree_node_topology_overwritten_by_next_node,
- "btree node overwritten by next node at btree %s level %u:\n"
- " node %s\n"
- " next %s",
- bch2_btree_id_str(b->c.btree_id), b->c.level,
- buf1.buf, buf2.buf)) {
- ret = DROP_PREV_NODE;
- goto out;
- }
+ prt_str(&buf, "\n next: ");
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&cur->key));
- if (mustfix_fsck_err_on(!bpos_eq(prev->key.k.p,
- bpos_predecessor(cur->data->min_key)), c,
- btree_node_topology_bad_max_key,
- "btree node with incorrect max_key at btree %s level %u:\n"
- " node %s\n"
- " next %s",
- bch2_btree_id_str(b->c.btree_id), b->c.level,
- buf1.buf, buf2.buf))
- ret = set_node_max(c, prev,
- bpos_predecessor(cur->data->min_key));
- } else {
- /* prev overwrites cur: */
-
- if (mustfix_fsck_err_on(bpos_ge(expected_start,
- cur->data->max_key), c,
- btree_node_topology_overwritten_by_prev_node,
- "btree node overwritten by prev node at btree %s level %u:\n"
- " prev %s\n"
- " node %s",
- bch2_btree_id_str(b->c.btree_id), b->c.level,
- buf1.buf, buf2.buf)) {
- ret = DROP_THIS_NODE;
- goto out;
- }
+ if (bpos_lt(expected_start, cur->data->min_key)) { /* gap */
+ if (b->c.level == 1 &&
+ bpos_lt(*pulled_from_scan, cur->data->min_key)) {
+ ret = bch2_get_scanned_nodes(c, b->c.btree_id, 0,
+ expected_start,
+ bpos_predecessor(cur->data->min_key));
+ if (ret)
+ goto err;
- if (mustfix_fsck_err_on(!bpos_eq(expected_start, cur->data->min_key), c,
- btree_node_topology_bad_min_key,
- "btree node with incorrect min_key at btree %s level %u:\n"
- " prev %s\n"
- " node %s",
- bch2_btree_id_str(b->c.btree_id), b->c.level,
- buf1.buf, buf2.buf))
- ret = set_node_min(c, cur, expected_start);
+ *pulled_from_scan = cur->data->min_key;
+ ret = DID_FILL_FROM_SCAN;
+ } else {
+ if (mustfix_fsck_err(c, btree_node_topology_bad_min_key,
+ "btree node with incorrect min_key%s", buf.buf))
+ ret = set_node_min(c, cur, expected_start);
+ }
+ } else { /* overlap */
+ if (prev && BTREE_NODE_SEQ(cur->data) > BTREE_NODE_SEQ(prev->data)) { /* cur overwrites prev */
+ if (bpos_ge(prev->data->min_key, cur->data->min_key)) { /* fully? */
+ if (mustfix_fsck_err(c, btree_node_topology_overwritten_by_next_node,
+ "btree node overwritten by next node%s", buf.buf))
+ ret = DROP_PREV_NODE;
+ } else {
+ if (mustfix_fsck_err(c, btree_node_topology_bad_max_key,
+ "btree node with incorrect max_key%s", buf.buf))
+ ret = set_node_max(c, prev,
+ bpos_predecessor(cur->data->min_key));
+ }
+ } else {
+ if (bpos_ge(expected_start, cur->data->max_key)) { /* fully? */
+ if (mustfix_fsck_err(c, btree_node_topology_overwritten_by_prev_node,
+ "btree node overwritten by prev node%s", buf.buf))
+ ret = DROP_THIS_NODE;
+ } else {
+ if (mustfix_fsck_err(c, btree_node_topology_bad_min_key,
+ "btree node with incorrect min_key%s", buf.buf))
+ ret = set_node_min(c, cur, expected_start);
+ }
+ }
}
-out:
+err:
fsck_err:
- printbuf_exit(&buf2);
- printbuf_exit(&buf1);
+ printbuf_exit(&buf);
return ret;
}
static int btree_repair_node_end(struct bch_fs *c, struct btree *b,
- struct btree *child)
+ struct btree *child, struct bpos *pulled_from_scan)
{
- struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
+ struct printbuf buf = PRINTBUF;
int ret = 0;
- bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(&child->key));
- bch2_bpos_to_text(&buf2, b->key.k.p);
+ if (bpos_eq(child->key.k.p, b->key.k.p))
+ return 0;
- if (mustfix_fsck_err_on(!bpos_eq(child->key.k.p, b->key.k.p), c,
- btree_node_topology_bad_max_key,
- "btree node with incorrect max_key at btree %s level %u:\n"
- " %s\n"
- " expected %s",
- bch2_btree_id_str(b->c.btree_id), b->c.level,
- buf1.buf, buf2.buf)) {
- ret = set_node_max(c, child, b->key.k.p);
- if (ret)
- goto err;
+ prt_printf(&buf, "at btree %s level %u:\n parent: ",
+ bch2_btree_id_str(b->c.btree_id), b->c.level);
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
+
+ prt_str(&buf, "\n child: ");
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&child->key));
+
+ if (mustfix_fsck_err(c, btree_node_topology_bad_max_key,
+ "btree node with incorrect max_key%s", buf.buf)) {
+ if (b->c.level == 1 &&
+ bpos_lt(*pulled_from_scan, b->key.k.p)) {
+ ret = bch2_get_scanned_nodes(c, b->c.btree_id, 0,
+ bpos_successor(child->key.k.p), b->key.k.p);
+ if (ret)
+ goto err;
+
+ *pulled_from_scan = b->key.k.p;
+ ret = DID_FILL_FROM_SCAN;
+ } else {
+ ret = set_node_max(c, child, b->key.k.p);
+ }
}
err:
fsck_err:
- printbuf_exit(&buf2);
- printbuf_exit(&buf1);
+ printbuf_exit(&buf);
return ret;
}
-static int bch2_btree_repair_topology_recurse(struct btree_trans *trans, struct btree *b)
+static int bch2_btree_repair_topology_recurse(struct btree_trans *trans, struct btree *b,
+ struct bpos *pulled_from_scan)
{
struct bch_fs *c = trans->c;
struct btree_and_journal_iter iter;
struct bkey_s_c k;
struct bkey_buf prev_k, cur_k;
struct btree *prev = NULL, *cur = NULL;
- bool have_child, dropped_children = false;
+ bool have_child, new_pass = false;
struct printbuf buf = PRINTBUF;
int ret = 0;
if (!b->c.level)
return 0;
-again:
- prev = NULL;
- have_child = dropped_children = false;
+
bch2_bkey_buf_init(&prev_k);
bch2_bkey_buf_init(&cur_k);
+again:
+ cur = prev = NULL;
+ have_child = new_pass = false;
bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
iter.prefetch = true;
@@ -415,11 +332,17 @@ again:
b->c.level - 1,
buf.buf)) {
bch2_btree_node_evict(trans, cur_k.k);
+ cur = NULL;
ret = bch2_journal_key_delete(c, b->c.btree_id,
b->c.level, cur_k.k->k.p);
- cur = NULL;
if (ret)
break;
+
+ if (!btree_id_is_alloc(b->c.btree_id)) {
+ ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_scan_for_btree_nodes);
+ if (ret)
+ break;
+ }
continue;
}
@@ -427,7 +350,23 @@ again:
if (ret)
break;
- ret = btree_repair_node_boundaries(c, b, prev, cur);
+ if (bch2_btree_node_is_stale(c, cur)) {
+ bch_info(c, "btree node %s older than nodes found by scanning", buf.buf);
+ six_unlock_read(&cur->c.lock);
+ bch2_btree_node_evict(trans, cur_k.k);
+ ret = bch2_journal_key_delete(c, b->c.btree_id,
+ b->c.level, cur_k.k->k.p);
+ cur = NULL;
+ if (ret)
+ break;
+ continue;
+ }
+
+ ret = btree_check_node_boundaries(c, b, prev, cur, pulled_from_scan);
+ if (ret == DID_FILL_FROM_SCAN) {
+ new_pass = true;
+ ret = 0;
+ }
if (ret == DROP_THIS_NODE) {
six_unlock_read(&cur->c.lock);
@@ -445,6 +384,7 @@ again:
prev = NULL;
if (ret == DROP_PREV_NODE) {
+ bch_info(c, "dropped prev node");
bch2_btree_node_evict(trans, prev_k.k);
ret = bch2_journal_key_delete(c, b->c.btree_id,
b->c.level, prev_k.k->k.p);
@@ -452,8 +392,6 @@ again:
break;
bch2_btree_and_journal_iter_exit(&iter);
- bch2_bkey_buf_exit(&prev_k, c);
- bch2_bkey_buf_exit(&cur_k, c);
goto again;
} else if (ret)
break;
@@ -465,7 +403,11 @@ again:
if (!ret && !IS_ERR_OR_NULL(prev)) {
BUG_ON(cur);
- ret = btree_repair_node_end(c, b, prev);
+ ret = btree_repair_node_end(c, b, prev, pulled_from_scan);
+ if (ret == DID_FILL_FROM_SCAN) {
+ new_pass = true;
+ ret = 0;
+ }
}
if (!IS_ERR_OR_NULL(prev))
@@ -479,6 +421,10 @@ again:
goto err;
bch2_btree_and_journal_iter_exit(&iter);
+
+ if (new_pass)
+ goto again;
+
bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
iter.prefetch = true;
@@ -495,7 +441,7 @@ again:
if (ret)
goto err;
- ret = bch2_btree_repair_topology_recurse(trans, cur);
+ ret = bch2_btree_repair_topology_recurse(trans, cur, pulled_from_scan);
six_unlock_read(&cur->c.lock);
cur = NULL;
@@ -503,7 +449,7 @@ again:
bch2_btree_node_evict(trans, cur_k.k);
ret = bch2_journal_key_delete(c, b->c.btree_id,
b->c.level, cur_k.k->k.p);
- dropped_children = true;
+ new_pass = true;
}
if (ret)
@@ -530,12 +476,14 @@ fsck_err:
six_unlock_read(&cur->c.lock);
bch2_btree_and_journal_iter_exit(&iter);
- bch2_bkey_buf_exit(&prev_k, c);
- bch2_bkey_buf_exit(&cur_k, c);
- if (!ret && dropped_children)
+ if (!ret && new_pass)
goto again;
+ BUG_ON(!ret && bch2_btree_node_check_topology(trans, b));
+
+ bch2_bkey_buf_exit(&prev_k, c);
+ bch2_bkey_buf_exit(&cur_k, c);
printbuf_exit(&buf);
return ret;
}
@@ -543,534 +491,183 @@ fsck_err:
int bch2_check_topology(struct bch_fs *c)
{
struct btree_trans *trans = bch2_trans_get(c);
- struct btree *b;
- unsigned i;
+ struct bpos pulled_from_scan = POS_MIN;
int ret = 0;
- for (i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
+ for (unsigned i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
struct btree_root *r = bch2_btree_id_root(c, i);
+ bool reconstructed_root = false;
- if (!r->alive)
- continue;
-
- b = r->b;
- if (btree_node_fake(b))
- continue;
-
- btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
- ret = bch2_btree_repair_topology_recurse(trans, b);
- six_unlock_read(&b->c.lock);
-
- if (ret == DROP_THIS_NODE) {
- bch_err(c, "empty btree root - repair unimplemented");
- ret = -BCH_ERR_fsck_repair_unimplemented;
- }
- }
-
- bch2_trans_put(trans);
-
- return ret;
-}
-
-static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id,
- unsigned level, bool is_root,
- struct bkey_s_c *k)
-{
- struct bch_fs *c = trans->c;
- struct bkey_ptrs_c ptrs_c = bch2_bkey_ptrs_c(*k);
- const union bch_extent_entry *entry_c;
- struct extent_ptr_decoded p = { 0 };
- bool do_update = false;
- struct printbuf buf = PRINTBUF;
- int ret = 0;
-
- /*
- * XXX
- * use check_bucket_ref here
- */
- bkey_for_each_ptr_decode(k->k, ptrs_c, p, entry_c) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
- struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
- enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, &entry_c->ptr);
-
- if (fsck_err_on(!g->gen_valid,
- c, ptr_to_missing_alloc_key,
- "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
- "while marking %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
- bch2_data_type_str(ptr_data_type(k->k, &p.ptr)),
- p.ptr.gen,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) {
- if (!p.ptr.cached) {
- g->gen_valid = true;
- g->gen = p.ptr.gen;
- } else {
- do_update = true;
- }
- }
-
- if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0,
- c, ptr_gen_newer_than_bucket_gen,
- "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
- "while marking %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
- bch2_data_type_str(ptr_data_type(k->k, &p.ptr)),
- p.ptr.gen, g->gen,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) {
- if (!p.ptr.cached) {
- g->gen_valid = true;
- g->gen = p.ptr.gen;
- g->data_type = 0;
- g->dirty_sectors = 0;
- g->cached_sectors = 0;
- set_bit(BCH_FS_need_another_gc, &c->flags);
- } else {
- do_update = true;
- }
- }
+ if (r->error) {
+ ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_scan_for_btree_nodes);
+ if (ret)
+ break;
+reconstruct_root:
+ bch_info(c, "btree root %s unreadable, must recover from scan", bch2_btree_id_str(i));
- if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX,
- c, ptr_gen_newer_than_bucket_gen,
- "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
- "while marking %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
- bch2_data_type_str(ptr_data_type(k->k, &p.ptr)),
- p.ptr.gen,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))
- do_update = true;
-
- if (fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0,
- c, stale_dirty_ptr,
- "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
- "while marking %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
- bch2_data_type_str(ptr_data_type(k->k, &p.ptr)),
- p.ptr.gen, g->gen,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))
- do_update = true;
-
- if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen)
- continue;
+ r->alive = false;
+ r->error = 0;
- if (fsck_err_on(bucket_data_type(g->data_type) &&
- bucket_data_type(g->data_type) != data_type, c,
- ptr_bucket_data_type_mismatch,
- "bucket %u:%zu different types of data in same bucket: %s, %s\n"
- "while marking %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
- bch2_data_type_str(g->data_type),
- bch2_data_type_str(data_type),
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) {
- if (data_type == BCH_DATA_btree) {
- g->data_type = data_type;
- set_bit(BCH_FS_need_another_gc, &c->flags);
+ if (!bch2_btree_has_scanned_nodes(c, i)) {
+ mustfix_fsck_err(c, btree_root_unreadable_and_scan_found_nothing,
+ "no nodes found for btree %s, continue?", bch2_btree_id_str(i));
+ bch2_btree_root_alloc_fake_trans(trans, i, 0);
} else {
- do_update = true;
+ bch2_btree_root_alloc_fake_trans(trans, i, 1);
+ bch2_shoot_down_journal_keys(c, i, 1, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
+ ret = bch2_get_scanned_nodes(c, i, 0, POS_MIN, SPOS_MAX);
+ if (ret)
+ break;
}
- }
-
- if (p.has_ec) {
- struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx);
-
- if (fsck_err_on(!m || !m->alive, c,
- ptr_to_missing_stripe,
- "pointer to nonexistent stripe %llu\n"
- "while marking %s",
- (u64) p.ec.idx,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))
- do_update = true;
-
- if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p), c,
- ptr_to_incorrect_stripe,
- "pointer does not match stripe %llu\n"
- "while marking %s",
- (u64) p.ec.idx,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))
- do_update = true;
- }
- }
-
- if (do_update) {
- struct bkey_ptrs ptrs;
- union bch_extent_entry *entry;
- struct bch_extent_ptr *ptr;
- struct bkey_i *new;
- if (is_root) {
- bch_err(c, "cannot update btree roots yet");
- ret = -EINVAL;
- goto err;
- }
-
- new = kmalloc(bkey_bytes(k->k), GFP_KERNEL);
- if (!new) {
- ret = -BCH_ERR_ENOMEM_gc_repair_key;
- bch_err_msg(c, ret, "allocating new key");
- goto err;
+ reconstructed_root = true;
}
- bkey_reassemble(new, *k);
+ struct btree *b = r->b;
- if (level) {
- /*
- * We don't want to drop btree node pointers - if the
- * btree node isn't there anymore, the read path will
- * sort it out:
- */
- ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
- bkey_for_each_ptr(ptrs, ptr) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
- struct bucket *g = PTR_GC_BUCKET(ca, ptr);
-
- ptr->gen = g->gen;
- }
- } else {
- bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, ({
- struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
- struct bucket *g = PTR_GC_BUCKET(ca, ptr);
- enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, ptr);
-
- (ptr->cached &&
- (!g->gen_valid || gen_cmp(ptr->gen, g->gen) > 0)) ||
- (!ptr->cached &&
- gen_cmp(ptr->gen, g->gen) < 0) ||
- gen_cmp(g->gen, ptr->gen) > BUCKET_GC_GEN_MAX ||
- (g->data_type &&
- g->data_type != data_type);
- }));
-again:
- ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
- bkey_extent_entry_for_each(ptrs, entry) {
- if (extent_entry_type(entry) == BCH_EXTENT_ENTRY_stripe_ptr) {
- struct gc_stripe *m = genradix_ptr(&c->gc_stripes,
- entry->stripe_ptr.idx);
- union bch_extent_entry *next_ptr;
-
- bkey_extent_entry_for_each_from(ptrs, next_ptr, entry)
- if (extent_entry_type(next_ptr) == BCH_EXTENT_ENTRY_ptr)
- goto found;
- next_ptr = NULL;
-found:
- if (!next_ptr) {
- bch_err(c, "aieee, found stripe ptr with no data ptr");
- continue;
- }
-
- if (!m || !m->alive ||
- !__bch2_ptr_matches_stripe(&m->ptrs[entry->stripe_ptr.block],
- &next_ptr->ptr,
- m->sectors)) {
- bch2_bkey_extent_entry_drop(new, entry);
- goto again;
- }
- }
- }
- }
+ btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
+ ret = bch2_btree_repair_topology_recurse(trans, b, &pulled_from_scan);
+ six_unlock_read(&b->c.lock);
- ret = bch2_journal_key_insert_take(c, btree_id, level, new);
- if (ret) {
- kfree(new);
- goto err;
- }
+ if (ret == DROP_THIS_NODE) {
+ bch2_btree_node_hash_remove(&c->btree_cache, b);
+ mutex_lock(&c->btree_cache.lock);
+ list_move(&b->list, &c->btree_cache.freeable);
+ mutex_unlock(&c->btree_cache.lock);
- if (level)
- bch2_btree_node_update_key_early(trans, btree_id, level - 1, *k, new);
+ r->b = NULL;
- if (0) {
- printbuf_reset(&buf);
- bch2_bkey_val_to_text(&buf, c, *k);
- bch_info(c, "updated %s", buf.buf);
+ if (!reconstructed_root)
+ goto reconstruct_root;
- printbuf_reset(&buf);
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(new));
- bch_info(c, "new key %s", buf.buf);
+ bch_err(c, "empty btree root %s", bch2_btree_id_str(i));
+ bch2_btree_root_alloc_fake_trans(trans, i, 0);
+ r->alive = false;
+ ret = 0;
}
-
- *k = bkey_i_to_s_c(new);
}
-err:
fsck_err:
- printbuf_exit(&buf);
+ bch2_trans_put(trans);
return ret;
}
/* marking of btree keys/nodes: */
static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
- unsigned level, bool is_root,
- struct bkey_s_c *k,
+ unsigned level, struct btree **prev,
+ struct btree_iter *iter, struct bkey_s_c k,
bool initial)
{
struct bch_fs *c = trans->c;
+
+ if (iter) {
+ struct btree_path *path = btree_iter_path(trans, iter);
+ struct btree *b = path_l(path)->b;
+
+ if (*prev != b) {
+ int ret = bch2_btree_node_check_topology(trans, b);
+ if (ret)
+ return ret;
+ }
+ *prev = b;
+ }
+
struct bkey deleted = KEY(0, 0, 0);
struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL };
+ struct printbuf buf = PRINTBUF;
int ret = 0;
- deleted.p = k->k->p;
+ deleted.p = k.k->p;
if (initial) {
BUG_ON(bch2_journal_seq_verify &&
- k->k->version.lo > atomic64_read(&c->journal.seq));
+ k.k->version.lo > atomic64_read(&c->journal.seq));
- ret = bch2_check_fix_ptrs(trans, btree_id, level, is_root, k);
- if (ret)
- goto err;
-
- if (fsck_err_on(k->k->version.lo > atomic64_read(&c->key_version), c,
+ if (fsck_err_on(k.k->version.lo > atomic64_read(&c->key_version), c,
bkey_version_in_future,
"key version number higher than recorded: %llu > %llu",
- k->k->version.lo,
+ k.k->version.lo,
atomic64_read(&c->key_version)))
- atomic64_set(&c->key_version, k->k->version.lo);
+ atomic64_set(&c->key_version, k.k->version.lo);
}
- ret = commit_do(trans, NULL, NULL, 0,
- bch2_key_trigger(trans, btree_id, level, old, unsafe_bkey_s_c_to_s(*k), BTREE_TRIGGER_GC));
-fsck_err:
-err:
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int btree_gc_mark_node(struct btree_trans *trans, struct btree *b, bool initial)
-{
- struct bch_fs *c = trans->c;
- struct btree_node_iter iter;
- struct bkey unpacked;
- struct bkey_s_c k;
- struct bkey_buf prev, cur;
- int ret = 0;
-
- if (!btree_node_type_needs_gc(btree_node_type(b)))
- return 0;
-
- bch2_btree_node_iter_init_from_start(&iter, b);
- bch2_bkey_buf_init(&prev);
- bch2_bkey_buf_init(&cur);
- bkey_init(&prev.k->k);
-
- while ((k = bch2_btree_node_iter_peek_unpack(&iter, b, &unpacked)).k) {
- ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, false,
- &k, initial);
- if (ret)
- break;
-
- bch2_btree_node_iter_advance(&iter, b);
-
- if (b->c.level) {
- bch2_bkey_buf_reassemble(&cur, c, k);
-
- ret = bch2_gc_check_topology(c, b, &prev, cur,
- bch2_btree_node_iter_end(&iter));
- if (ret)
- break;
- }
+ if (mustfix_fsck_err_on(level && !bch2_dev_btree_bitmap_marked(c, k),
+ c, btree_bitmap_not_marked,
+ "btree ptr not marked in member info btree allocated bitmap\n %s",
+ (bch2_bkey_val_to_text(&buf, c, k),
+ buf.buf))) {
+ mutex_lock(&c->sb_lock);
+ bch2_dev_btree_bitmap_mark(c, k);
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
}
- bch2_bkey_buf_exit(&cur, c);
- bch2_bkey_buf_exit(&prev, c);
- return ret;
-}
-
-static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree_id,
- bool initial, bool metadata_only)
-{
- struct bch_fs *c = trans->c;
- struct btree_iter iter;
- struct btree *b;
- unsigned depth = metadata_only ? 1 : 0;
- int ret = 0;
-
- gc_pos_set(c, gc_pos_btree(btree_id, POS_MIN, 0));
-
- __for_each_btree_node(trans, iter, btree_id, POS_MIN,
- 0, depth, BTREE_ITER_PREFETCH, b, ret) {
- bch2_verify_btree_nr_keys(b);
-
- gc_pos_set(c, gc_pos_btree_node(b));
-
- ret = btree_gc_mark_node(trans, b, initial);
- if (ret)
- break;
- }
- bch2_trans_iter_exit(trans, &iter);
+ /*
+ * We require a commit before key_trigger() because
+ * key_trigger(BTREE_TRIGGER_GC) is not idempotant; we'll calculate the
+ * wrong result if we run it multiple times.
+ */
+ unsigned flags = !iter ? BTREE_TRIGGER_is_root : 0;
+ ret = bch2_key_trigger(trans, btree_id, level, old, unsafe_bkey_s_c_to_s(k),
+ BTREE_TRIGGER_check_repair|flags);
if (ret)
- return ret;
-
- mutex_lock(&c->btree_root_lock);
- b = bch2_btree_id_root(c, btree_id)->b;
- if (!btree_node_fake(b)) {
- struct bkey_s_c k = bkey_i_to_s_c(&b->key);
-
- ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level + 1,
- true, &k, initial);
- }
- gc_pos_set(c, gc_pos_btree_root(b->c.btree_id));
- mutex_unlock(&c->btree_root_lock);
-
- return ret;
-}
-
-static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b,
- unsigned target_depth)
-{
- struct bch_fs *c = trans->c;
- struct btree_and_journal_iter iter;
- struct bkey_s_c k;
- struct bkey_buf cur, prev;
- struct printbuf buf = PRINTBUF;
- int ret = 0;
-
- bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
- bch2_bkey_buf_init(&prev);
- bch2_bkey_buf_init(&cur);
- bkey_init(&prev.k->k);
-
- while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
- BUG_ON(bpos_lt(k.k->p, b->data->min_key));
- BUG_ON(bpos_gt(k.k->p, b->data->max_key));
-
- ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level,
- false, &k, true);
- if (ret)
- goto fsck_err;
-
- if (b->c.level) {
- bch2_bkey_buf_reassemble(&cur, c, k);
- k = bkey_i_to_s_c(cur.k);
-
- bch2_btree_and_journal_iter_advance(&iter);
+ goto out;
- ret = bch2_gc_check_topology(c, b,
- &prev, cur,
- !bch2_btree_and_journal_iter_peek(&iter).k);
- if (ret)
- goto fsck_err;
- } else {
- bch2_btree_and_journal_iter_advance(&iter);
- }
+ if (trans->nr_updates) {
+ ret = bch2_trans_commit(trans, NULL, NULL, 0) ?:
+ -BCH_ERR_transaction_restart_nested;
+ goto out;
}
- if (b->c.level > target_depth) {
- bch2_btree_and_journal_iter_exit(&iter);
- bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
- iter.prefetch = true;
-
- while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
- struct btree *child;
-
- bch2_bkey_buf_reassemble(&cur, c, k);
- bch2_btree_and_journal_iter_advance(&iter);
-
- child = bch2_btree_node_get_noiter(trans, cur.k,
- b->c.btree_id, b->c.level - 1,
- false);
- ret = PTR_ERR_OR_ZERO(child);
-
- if (bch2_err_matches(ret, EIO)) {
- bch2_topology_error(c);
-
- if (__fsck_err(c,
- FSCK_CAN_FIX|
- FSCK_CAN_IGNORE|
- FSCK_NO_RATELIMIT,
- btree_node_read_error,
- "Unreadable btree node at btree %s level %u:\n"
- " %s",
- bch2_btree_id_str(b->c.btree_id),
- b->c.level - 1,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur.k)), buf.buf)) &&
- should_restart_for_topology_repair(c)) {
- bch_info(c, "Halting mark and sweep to start topology repair pass");
- ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
- goto fsck_err;
- } else {
- /* Continue marking when opted to not
- * fix the error: */
- ret = 0;
- set_bit(BCH_FS_initial_gc_unfixed, &c->flags);
- continue;
- }
- } else if (ret) {
- bch_err_msg(c, ret, "getting btree node");
- break;
- }
-
- ret = bch2_gc_btree_init_recurse(trans, child,
- target_depth);
- six_unlock_read(&child->c.lock);
-
- if (ret)
- break;
- }
- }
+ ret = bch2_key_trigger(trans, btree_id, level, old, unsafe_bkey_s_c_to_s(k),
+ BTREE_TRIGGER_gc|flags);
+out:
fsck_err:
- bch2_bkey_buf_exit(&cur, c);
- bch2_bkey_buf_exit(&prev, c);
- bch2_btree_and_journal_iter_exit(&iter);
printbuf_exit(&buf);
+ bch_err_fn(c, ret);
return ret;
}
-static int bch2_gc_btree_init(struct btree_trans *trans,
- enum btree_id btree_id,
- bool metadata_only)
+static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree, bool initial)
{
struct bch_fs *c = trans->c;
- struct btree *b;
- unsigned target_depth = metadata_only ? 1 : 0;
- struct printbuf buf = PRINTBUF;
+ int level = 0, target_depth = btree_node_type_needs_gc(__btree_node_type(0, btree)) ? 0 : 1;
int ret = 0;
- b = bch2_btree_id_root(c, btree_id)->b;
-
- if (btree_node_fake(b))
- return 0;
-
- six_lock_read(&b->c.lock, NULL, NULL);
- printbuf_reset(&buf);
- bch2_bpos_to_text(&buf, b->data->min_key);
- if (mustfix_fsck_err_on(!bpos_eq(b->data->min_key, POS_MIN), c,
- btree_root_bad_min_key,
- "btree root with incorrect min_key: %s", buf.buf)) {
- bch_err(c, "repair unimplemented");
- ret = -BCH_ERR_fsck_repair_unimplemented;
- goto fsck_err;
- }
+ /* We need to make sure every leaf node is readable before going RW */
+ if (initial)
+ target_depth = 0;
- printbuf_reset(&buf);
- bch2_bpos_to_text(&buf, b->data->max_key);
- if (mustfix_fsck_err_on(!bpos_eq(b->data->max_key, SPOS_MAX), c,
- btree_root_bad_max_key,
- "btree root with incorrect max_key: %s", buf.buf)) {
- bch_err(c, "repair unimplemented");
- ret = -BCH_ERR_fsck_repair_unimplemented;
- goto fsck_err;
+ /* root */
+ mutex_lock(&c->btree_root_lock);
+ struct btree *b = bch2_btree_id_root(c, btree)->b;
+ if (!btree_node_fake(b)) {
+ gc_pos_set(c, gc_pos_btree(btree, b->c.level + 1, SPOS_MAX));
+ ret = lockrestart_do(trans,
+ bch2_gc_mark_key(trans, b->c.btree_id, b->c.level + 1,
+ NULL, NULL, bkey_i_to_s_c(&b->key), initial));
+ level = b->c.level;
}
+ mutex_unlock(&c->btree_root_lock);
- if (b->c.level >= target_depth)
- ret = bch2_gc_btree_init_recurse(trans, b, target_depth);
+ if (ret)
+ return ret;
- if (!ret) {
- struct bkey_s_c k = bkey_i_to_s_c(&b->key);
+ for (; level >= target_depth; --level) {
+ struct btree *prev = NULL;
+ struct btree_iter iter;
+ bch2_trans_node_iter_init(trans, &iter, btree, POS_MIN, 0, level,
+ BTREE_ITER_prefetch);
- ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level + 1, true,
- &k, true);
+ ret = for_each_btree_key_continue(trans, iter, 0, k, ({
+ gc_pos_set(c, gc_pos_btree(btree, level, k.k->p));
+ bch2_gc_mark_key(trans, btree, level, &prev, &iter, k, initial);
+ }));
+ if (ret)
+ break;
}
-fsck_err:
- six_unlock_read(&b->c.lock);
- bch_err_fn(c, ret);
- printbuf_exit(&buf);
return ret;
}
@@ -1080,7 +677,7 @@ static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r)
(int) btree_id_to_gc_phase(r);
}
-static int bch2_gc_btrees(struct bch_fs *c, bool initial, bool metadata_only)
+static int bch2_gc_btrees(struct bch_fs *c)
{
struct btree_trans *trans = bch2_trans_get(c);
enum btree_id ids[BTREE_ID_NR];
@@ -1091,98 +688,36 @@ static int bch2_gc_btrees(struct bch_fs *c, bool initial, bool metadata_only)
ids[i] = i;
bubble_sort(ids, BTREE_ID_NR, btree_id_gc_phase_cmp);
- for (i = 0; i < BTREE_ID_NR && !ret; i++)
- ret = initial
- ? bch2_gc_btree_init(trans, ids[i], metadata_only)
- : bch2_gc_btree(trans, ids[i], initial, metadata_only);
+ for (i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
+ unsigned btree = i < BTREE_ID_NR ? ids[i] : i;
- for (i = BTREE_ID_NR; i < btree_id_nr_alive(c) && !ret; i++) {
- if (!bch2_btree_id_root(c, i)->alive)
+ if (IS_ERR_OR_NULL(bch2_btree_id_root(c, btree)->b))
continue;
- ret = initial
- ? bch2_gc_btree_init(trans, i, metadata_only)
- : bch2_gc_btree(trans, i, initial, metadata_only);
- }
+ ret = bch2_gc_btree(trans, btree, true);
+ if (mustfix_fsck_err_on(bch2_err_matches(ret, EIO),
+ c, btree_node_read_error,
+ "btree node read error for %s",
+ bch2_btree_id_str(btree)))
+ ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
+ }
+fsck_err:
bch2_trans_put(trans);
bch_err_fn(c, ret);
return ret;
}
-static void mark_metadata_sectors(struct bch_fs *c, struct bch_dev *ca,
- u64 start, u64 end,
- enum bch_data_type type,
- unsigned flags)
-{
- u64 b = sector_to_bucket(ca, start);
-
- do {
- unsigned sectors =
- min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
-
- bch2_mark_metadata_bucket(c, ca, b, type, sectors,
- gc_phase(GC_PHASE_SB), flags);
- b++;
- start += sectors;
- } while (start < end);
-}
-
-static void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
- unsigned flags)
-{
- struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
- unsigned i;
- u64 b;
-
- for (i = 0; i < layout->nr_superblocks; i++) {
- u64 offset = le64_to_cpu(layout->sb_offset[i]);
-
- if (offset == BCH_SB_SECTOR)
- mark_metadata_sectors(c, ca, 0, BCH_SB_SECTOR,
- BCH_DATA_sb, flags);
-
- mark_metadata_sectors(c, ca, offset,
- offset + (1 << layout->sb_max_size_bits),
- BCH_DATA_sb, flags);
- }
-
- for (i = 0; i < ca->journal.nr; i++) {
- b = ca->journal.buckets[i];
- bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_journal,
- ca->mi.bucket_size,
- gc_phase(GC_PHASE_SB), flags);
- }
-}
-
-static void bch2_mark_superblocks(struct bch_fs *c)
+static int bch2_mark_superblocks(struct bch_fs *c)
{
mutex_lock(&c->sb_lock);
gc_pos_set(c, gc_phase(GC_PHASE_SB));
- for_each_online_member(c, ca)
- bch2_mark_dev_superblock(c, ca, BTREE_TRIGGER_GC);
+ int ret = bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_gc);
mutex_unlock(&c->sb_lock);
+ return ret;
}
-#if 0
-/* Also see bch2_pending_btree_node_free_insert_done() */
-static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
-{
- struct btree_update *as;
- struct pending_btree_node_free *d;
-
- mutex_lock(&c->btree_interior_update_lock);
- gc_pos_set(c, gc_phase(GC_PHASE_PENDING_DELETE));
-
- for_each_pending_btree_node_free(c, as, d)
- if (d->index_update_done)
- bch2_mark_key(c, bkey_i_to_s_c(&d->key), BTREE_TRIGGER_GC);
-
- mutex_unlock(&c->btree_interior_update_lock);
-}
-#endif
-
static void bch2_gc_free(struct bch_fs *c)
{
genradix_free(&c->reflink_gc_table);
@@ -1200,28 +735,23 @@ static void bch2_gc_free(struct bch_fs *c)
c->usage_gc = NULL;
}
-static int bch2_gc_done(struct bch_fs *c,
- bool initial, bool metadata_only)
+static int bch2_gc_done(struct bch_fs *c)
{
struct bch_dev *ca = NULL;
struct printbuf buf = PRINTBUF;
- bool verify = !metadata_only &&
- !c->opts.reconstruct_alloc &&
- (!initial || (c->sb.compat & (1ULL << BCH_COMPAT_alloc_info)));
unsigned i;
int ret = 0;
percpu_down_write(&c->mark_lock);
-#define copy_field(_err, _f, _msg, ...) \
- if (dst->_f != src->_f && \
- (!verify || \
- fsck_err(c, _err, _msg ": got %llu, should be %llu" \
- , ##__VA_ARGS__, dst->_f, src->_f))) \
+#define copy_field(_err, _f, _msg, ...) \
+ if (fsck_err_on(dst->_f != src->_f, c, _err, \
+ _msg ": got %llu, should be %llu" , ##__VA_ARGS__, \
+ dst->_f, src->_f)) \
dst->_f = src->_f
-#define copy_dev_field(_err, _f, _msg, ...) \
+#define copy_dev_field(_err, _f, _msg, ...) \
copy_field(_err, _f, "dev %u has wrong " _msg, ca->dev_idx, ##__VA_ARGS__)
-#define copy_fs_field(_err, _f, _msg, ...) \
+#define copy_fs_field(_err, _f, _msg, ...) \
copy_field(_err, _f, "fs has wrong " _msg, ##__VA_ARGS__)
for (i = 0; i < ARRAY_SIZE(c->usage); i++)
@@ -1254,31 +784,24 @@ static int bch2_gc_done(struct bch_fs *c,
copy_fs_field(fs_usage_btree_wrong,
b.btree, "btree");
- if (!metadata_only) {
- copy_fs_field(fs_usage_data_wrong,
- b.data, "data");
- copy_fs_field(fs_usage_cached_wrong,
- b.cached, "cached");
- copy_fs_field(fs_usage_reserved_wrong,
- b.reserved, "reserved");
- copy_fs_field(fs_usage_nr_inodes_wrong,
- b.nr_inodes,"nr_inodes");
-
- for (i = 0; i < BCH_REPLICAS_MAX; i++)
- copy_fs_field(fs_usage_persistent_reserved_wrong,
- persistent_reserved[i],
- "persistent_reserved[%i]", i);
- }
+ copy_fs_field(fs_usage_data_wrong,
+ b.data, "data");
+ copy_fs_field(fs_usage_cached_wrong,
+ b.cached, "cached");
+ copy_fs_field(fs_usage_reserved_wrong,
+ b.reserved, "reserved");
+ copy_fs_field(fs_usage_nr_inodes_wrong,
+ b.nr_inodes,"nr_inodes");
+
+ for (i = 0; i < BCH_REPLICAS_MAX; i++)
+ copy_fs_field(fs_usage_persistent_reserved_wrong,
+ persistent_reserved[i],
+ "persistent_reserved[%i]", i);
for (i = 0; i < c->replicas.nr; i++) {
struct bch_replicas_entry_v1 *e =
cpu_replicas_entry(&c->replicas, i);
- if (metadata_only &&
- (e->data_type == BCH_DATA_user ||
- e->data_type == BCH_DATA_cached))
- continue;
-
printbuf_reset(&buf);
bch2_replicas_entry_to_text(&buf, e);
@@ -1292,10 +815,8 @@ static int bch2_gc_done(struct bch_fs *c,
#undef copy_stripe_field
#undef copy_field
fsck_err:
- if (ca)
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
bch_err_fn(c, ret);
-
percpu_up_write(&c->mark_lock);
printbuf_exit(&buf);
return ret;
@@ -1318,7 +839,7 @@ static int bch2_gc_start(struct bch_fs *c)
ca->usage_gc = alloc_percpu(struct bch_dev_usage);
if (!ca->usage_gc) {
bch_err(c, "error allocating ca->usage_gc");
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
return -BCH_ERR_ENOMEM_gc_start;
}
@@ -1329,19 +850,6 @@ static int bch2_gc_start(struct bch_fs *c)
return 0;
}
-static int bch2_gc_reset(struct bch_fs *c)
-{
- for_each_member_device(c, ca) {
- free_percpu(ca->usage_gc);
- ca->usage_gc = NULL;
- }
-
- free_percpu(c->usage_gc);
- c->usage_gc = NULL;
-
- return bch2_gc_start(c);
-}
-
/* returns true if not equal */
static inline bool bch2_alloc_v4_cmp(struct bch_alloc_v4 l,
struct bch_alloc_v4 r)
@@ -1357,56 +865,41 @@ static inline bool bch2_alloc_v4_cmp(struct bch_alloc_v4 l,
static int bch2_alloc_write_key(struct btree_trans *trans,
struct btree_iter *iter,
- struct bkey_s_c k,
- bool metadata_only)
+ struct bch_dev *ca,
+ struct bkey_s_c k)
{
struct bch_fs *c = trans->c;
- struct bch_dev *ca = bch_dev_bkey_exists(c, iter->pos.inode);
- struct bucket old_gc, gc, *b;
struct bkey_i_alloc_v4 *a;
- struct bch_alloc_v4 old_convert, new;
+ struct bch_alloc_v4 old_gc, gc, old_convert, new;
const struct bch_alloc_v4 *old;
int ret;
old = bch2_alloc_to_v4(k, &old_convert);
- new = *old;
+ gc = new = *old;
percpu_down_read(&c->mark_lock);
- b = gc_bucket(ca, iter->pos.offset);
- old_gc = *b;
+ __bucket_m_to_alloc(&gc, *gc_bucket(ca, iter->pos.offset));
+
+ old_gc = gc;
if ((old->data_type == BCH_DATA_sb ||
old->data_type == BCH_DATA_journal) &&
!bch2_dev_is_online(ca)) {
- b->data_type = old->data_type;
- b->dirty_sectors = old->dirty_sectors;
+ gc.data_type = old->data_type;
+ gc.dirty_sectors = old->dirty_sectors;
}
/*
- * b->data_type doesn't yet include need_discard & need_gc_gen states -
+ * gc.data_type doesn't yet include need_discard & need_gc_gen states -
* fix that here:
*/
- b->data_type = __alloc_data_type(b->dirty_sectors,
- b->cached_sectors,
- b->stripe,
- *old,
- b->data_type);
- gc = *b;
+ alloc_data_type_set(&gc, gc.data_type);
if (gc.data_type != old_gc.data_type ||
gc.dirty_sectors != old_gc.dirty_sectors)
- bch2_dev_usage_update_m(c, ca, &old_gc, &gc);
+ bch2_dev_usage_update(c, ca, &old_gc, &gc, 0, true);
percpu_up_read(&c->mark_lock);
- if (metadata_only &&
- gc.data_type != BCH_DATA_sb &&
- gc.data_type != BCH_DATA_journal &&
- gc.data_type != BCH_DATA_btree)
- return 0;
-
- if (gen_after(old->gen, gc.gen))
- return 0;
-
if (fsck_err_on(new.data_type != gc.data_type, c,
alloc_key_data_type_wrong,
"bucket %llu:%llu gen %u has wrong data_type"
@@ -1456,12 +949,12 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
if (a->v.data_type == BCH_DATA_cached && !a->v.io_time[READ])
a->v.io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
- ret = bch2_trans_update(trans, iter, &a->k_i, BTREE_TRIGGER_NORUN);
+ ret = bch2_trans_update(trans, iter, &a->k_i, BTREE_TRIGGER_norun);
fsck_err:
return ret;
}
-static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only)
+static int bch2_gc_alloc_done(struct bch_fs *c)
{
int ret = 0;
@@ -1470,11 +963,11 @@ static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only)
for_each_btree_key_upto_commit(trans, iter, BTREE_ID_alloc,
POS(ca->dev_idx, ca->mi.first_bucket),
POS(ca->dev_idx, ca->mi.nbuckets - 1),
- BTREE_ITER_SLOTS|BTREE_ITER_PREFETCH, k,
+ BTREE_ITER_slots|BTREE_ITER_prefetch, k,
NULL, NULL, BCH_TRANS_COMMIT_lazy_rw,
- bch2_alloc_write_key(trans, &iter, k, metadata_only)));
+ bch2_alloc_write_key(trans, &iter, ca, k)));
if (ret) {
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
break;
}
}
@@ -1483,14 +976,14 @@ static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only)
return ret;
}
-static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
+static int bch2_gc_alloc_start(struct bch_fs *c)
{
for_each_member_device(c, ca) {
struct bucket_array *buckets = kvmalloc(sizeof(struct bucket_array) +
ca->mi.nbuckets * sizeof(struct bucket),
GFP_KERNEL|__GFP_ZERO);
if (!buckets) {
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
bch_err(c, "error allocating ca->buckets[gc]");
return -BCH_ERR_ENOMEM_gc_alloc_start;
}
@@ -1500,54 +993,29 @@ static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
rcu_assign_pointer(ca->buckets_gc, buckets);
}
+ struct bch_dev *ca = NULL;
int ret = bch2_trans_run(c,
for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
- BTREE_ITER_PREFETCH, k, ({
- struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
- struct bucket *g = gc_bucket(ca, k.k->p.offset);
+ BTREE_ITER_prefetch, k, ({
+ ca = bch2_dev_iterate(c, ca, k.k->p.inode);
+ if (!ca) {
+ bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
+ continue;
+ }
struct bch_alloc_v4 a_convert;
const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
+ struct bucket *g = gc_bucket(ca, k.k->p.offset);
g->gen_valid = 1;
g->gen = a->gen;
-
- if (metadata_only &&
- (a->data_type == BCH_DATA_user ||
- a->data_type == BCH_DATA_cached ||
- a->data_type == BCH_DATA_parity)) {
- g->data_type = a->data_type;
- g->dirty_sectors = a->dirty_sectors;
- g->cached_sectors = a->cached_sectors;
- g->stripe = a->stripe;
- g->stripe_redundancy = a->stripe_redundancy;
- }
-
0;
})));
+ bch2_dev_put(ca);
bch_err_fn(c, ret);
return ret;
}
-static void bch2_gc_alloc_reset(struct bch_fs *c, bool metadata_only)
-{
- for_each_member_device(c, ca) {
- struct bucket_array *buckets = gc_bucket_array(ca);
- struct bucket *g;
-
- for_each_bucket(g, buckets) {
- if (metadata_only &&
- (g->data_type == BCH_DATA_user ||
- g->data_type == BCH_DATA_cached ||
- g->data_type == BCH_DATA_parity))
- continue;
- g->data_type = 0;
- g->dirty_sectors = 0;
- g->cached_sectors = 0;
- }
- }
-}
-
static int bch2_gc_write_reflink_key(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_s_c k,
@@ -1583,7 +1051,7 @@ static int bch2_gc_write_reflink_key(struct btree_trans *trans,
struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
ret = PTR_ERR_OR_ZERO(new);
if (ret)
- return ret;
+ goto out;
if (!r->refcount)
new->k.type = KEY_TYPE_deleted;
@@ -1591,40 +1059,33 @@ static int bch2_gc_write_reflink_key(struct btree_trans *trans,
*bkey_refcount(bkey_i_to_s(new)) = cpu_to_le64(r->refcount);
ret = bch2_trans_update(trans, iter, new, 0);
}
+out:
fsck_err:
printbuf_exit(&buf);
return ret;
}
-static int bch2_gc_reflink_done(struct bch_fs *c, bool metadata_only)
+static int bch2_gc_reflink_done(struct bch_fs *c)
{
size_t idx = 0;
- if (metadata_only)
- return 0;
-
int ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter,
BTREE_ID_reflink, POS_MIN,
- BTREE_ITER_PREFETCH, k,
+ BTREE_ITER_prefetch, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
bch2_gc_write_reflink_key(trans, &iter, k, &idx)));
c->reflink_gc_nr = 0;
return ret;
}
-static int bch2_gc_reflink_start(struct bch_fs *c,
- bool metadata_only)
+static int bch2_gc_reflink_start(struct bch_fs *c)
{
-
- if (metadata_only)
- return 0;
-
c->reflink_gc_nr = 0;
int ret = bch2_trans_run(c,
for_each_btree_key(trans, iter, BTREE_ID_reflink, POS_MIN,
- BTREE_ITER_PREFETCH, k, ({
+ BTREE_ITER_prefetch, k, ({
const __le64 *refcount = bkey_refcount_c(k);
if (!refcount)
@@ -1647,15 +1108,6 @@ static int bch2_gc_reflink_start(struct bch_fs *c,
return ret;
}
-static void bch2_gc_reflink_reset(struct bch_fs *c, bool metadata_only)
-{
- struct genradix_iter iter;
- struct reflink_gc *r;
-
- genradix_for_each(&c->reflink_gc_table, iter, r)
- r->refcount = 0;
-}
-
static int bch2_gc_write_stripes_key(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_s_c k)
@@ -1709,30 +1161,20 @@ fsck_err:
return ret;
}
-static int bch2_gc_stripes_done(struct bch_fs *c, bool metadata_only)
+static int bch2_gc_stripes_done(struct bch_fs *c)
{
- if (metadata_only)
- return 0;
-
return bch2_trans_run(c,
for_each_btree_key_commit(trans, iter,
BTREE_ID_stripes, POS_MIN,
- BTREE_ITER_PREFETCH, k,
+ BTREE_ITER_prefetch, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
bch2_gc_write_stripes_key(trans, &iter, k)));
}
-static void bch2_gc_stripes_reset(struct bch_fs *c, bool metadata_only)
-{
- genradix_free(&c->gc_stripes);
-}
-
/**
- * bch2_gc - walk _all_ references to buckets, and recompute them:
+ * bch2_check_allocations - walk all references to buckets, and recompute them:
*
* @c: filesystem object
- * @initial: are we in recovery?
- * @metadata_only: are we just checking metadata references, or everything?
*
* Returns: 0 on success, or standard errcode on failure
*
@@ -1751,9 +1193,8 @@ static void bch2_gc_stripes_reset(struct bch_fs *c, bool metadata_only)
* move around - if references move backwards in the ordering GC
* uses, GC could skip past them
*/
-int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
+int bch2_check_allocations(struct bch_fs *c)
{
- unsigned iter = 0;
int ret;
lockdep_assert_held(&c->state_lock);
@@ -1763,62 +1204,30 @@ int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
bch2_btree_interior_updates_flush(c);
ret = bch2_gc_start(c) ?:
- bch2_gc_alloc_start(c, metadata_only) ?:
- bch2_gc_reflink_start(c, metadata_only);
+ bch2_gc_alloc_start(c) ?:
+ bch2_gc_reflink_start(c);
if (ret)
goto out;
-again:
- gc_pos_set(c, gc_phase(GC_PHASE_START));
- bch2_mark_superblocks(c);
+ gc_pos_set(c, gc_phase(GC_PHASE_START));
- ret = bch2_gc_btrees(c, initial, metadata_only);
+ ret = bch2_mark_superblocks(c);
+ BUG_ON(ret);
+ ret = bch2_gc_btrees(c);
if (ret)
goto out;
-#if 0
- bch2_mark_pending_btree_node_frees(c);
-#endif
c->gc_count++;
- if (test_bit(BCH_FS_need_another_gc, &c->flags) ||
- (!iter && bch2_test_restart_gc)) {
- if (iter++ > 2) {
- bch_info(c, "Unable to fix bucket gens, looping");
- ret = -EINVAL;
- goto out;
- }
-
- /*
- * XXX: make sure gens we fixed got saved
- */
- bch_info(c, "Second GC pass needed, restarting:");
- clear_bit(BCH_FS_need_another_gc, &c->flags);
- __gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
-
- bch2_gc_stripes_reset(c, metadata_only);
- bch2_gc_alloc_reset(c, metadata_only);
- bch2_gc_reflink_reset(c, metadata_only);
- ret = bch2_gc_reset(c);
- if (ret)
- goto out;
-
- /* flush fsck errors, reset counters */
- bch2_flush_fsck_errs(c);
- goto again;
- }
+ bch2_journal_block(&c->journal);
out:
- if (!ret) {
- bch2_journal_block(&c->journal);
+ ret = bch2_gc_alloc_done(c) ?:
+ bch2_gc_done(c) ?:
+ bch2_gc_stripes_done(c) ?:
+ bch2_gc_reflink_done(c);
- ret = bch2_gc_alloc_done(c, metadata_only) ?:
- bch2_gc_done(c, initial, metadata_only) ?:
- bch2_gc_stripes_done(c, metadata_only) ?:
- bch2_gc_reflink_done(c, metadata_only);
-
- bch2_journal_unblock(&c->journal);
- }
+ bch2_journal_unblock(&c->journal);
percpu_down_write(&c->mark_lock);
/* Indicates that gc is no longer in progress: */
@@ -1847,23 +1256,33 @@ static int gc_btree_gens_key(struct btree_trans *trans,
struct bkey_i *u;
int ret;
+ if (unlikely(test_bit(BCH_FS_going_ro, &c->flags)))
+ return -EROFS;
+
percpu_down_read(&c->mark_lock);
+ rcu_read_lock();
bkey_for_each_ptr(ptrs, ptr) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
+ if (!ca)
+ continue;
- if (ptr_stale(ca, ptr) > 16) {
+ if (dev_ptr_stale(ca, ptr) > 16) {
+ rcu_read_unlock();
percpu_up_read(&c->mark_lock);
goto update;
}
}
bkey_for_each_ptr(ptrs, ptr) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
- u8 *gen = &ca->oldest_gen[PTR_BUCKET_NR(ca, ptr)];
+ struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
+ if (!ca)
+ continue;
+ u8 *gen = &ca->oldest_gen[PTR_BUCKET_NR(ca, ptr)];
if (gen_after(*gen, ptr->gen))
*gen = ptr->gen;
}
+ rcu_read_unlock();
percpu_up_read(&c->mark_lock);
return 0;
update:
@@ -1876,10 +1295,9 @@ update:
return 0;
}
-static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct btree_iter *iter,
- struct bkey_s_c k)
+static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct bch_dev *ca,
+ struct btree_iter *iter, struct bkey_s_c k)
{
- struct bch_dev *ca = bch_dev_bkey_exists(trans->c, iter->pos.inode);
struct bch_alloc_v4 a_convert;
const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
struct bkey_i_alloc_v4 *a_mut;
@@ -1894,7 +1312,7 @@ static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct btree_i
return ret;
a_mut->v.oldest_gen = ca->oldest_gen[iter->pos.offset];
- a_mut->v.data_type = alloc_data_type(a_mut->v, a_mut->v.data_type);
+ alloc_data_type_set(&a_mut->v, a_mut->v.data_type);
return bch2_trans_update(trans, iter, &a_mut->k_i, 0);
}
@@ -1922,7 +1340,7 @@ int bch2_gc_gens(struct bch_fs *c)
ca->oldest_gen = kvmalloc(gens->nbuckets, GFP_KERNEL);
if (!ca->oldest_gen) {
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
ret = -BCH_ERR_ENOMEM_gc_gens;
goto err;
}
@@ -1940,7 +1358,7 @@ int bch2_gc_gens(struct bch_fs *c)
ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter, i,
POS_MIN,
- BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
+ BTREE_ITER_prefetch|BTREE_ITER_all_snapshots,
k,
NULL, NULL,
BCH_TRANS_COMMIT_no_enospc,
@@ -1949,14 +1367,23 @@ int bch2_gc_gens(struct bch_fs *c)
goto err;
}
+ struct bch_dev *ca = NULL;
ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
POS_MIN,
- BTREE_ITER_PREFETCH,
+ BTREE_ITER_prefetch,
k,
NULL, NULL,
- BCH_TRANS_COMMIT_no_enospc,
- bch2_alloc_write_oldest_gen(trans, &iter, k)));
+ BCH_TRANS_COMMIT_no_enospc, ({
+ ca = bch2_dev_iterate(c, ca, k.k->p.inode);
+ if (!ca) {
+ bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
+ continue;
+ }
+ bch2_alloc_write_oldest_gen(trans, ca, &iter, k);
+ })));
+ bch2_dev_put(ca);
+
if (ret)
goto err;
@@ -1980,87 +1407,23 @@ err:
return ret;
}
-static int bch2_gc_thread(void *arg)
+static void bch2_gc_gens_work(struct work_struct *work)
{
- struct bch_fs *c = arg;
- struct io_clock *clock = &c->io_clock[WRITE];
- unsigned long last = atomic64_read(&clock->now);
- unsigned last_kick = atomic_read(&c->kick_gc);
-
- set_freezable();
-
- while (1) {
- while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
-
- if (kthread_should_stop()) {
- __set_current_state(TASK_RUNNING);
- return 0;
- }
-
- if (atomic_read(&c->kick_gc) != last_kick)
- break;
-
- if (c->btree_gc_periodic) {
- unsigned long next = last + c->capacity / 16;
-
- if (atomic64_read(&clock->now) >= next)
- break;
-
- bch2_io_clock_schedule_timeout(clock, next);
- } else {
- schedule();
- }
-
- try_to_freeze();
- }
- __set_current_state(TASK_RUNNING);
-
- last = atomic64_read(&clock->now);
- last_kick = atomic_read(&c->kick_gc);
-
- /*
- * Full gc is currently incompatible with btree key cache:
- */
-#if 0
- ret = bch2_gc(c, false, false);
-#else
- bch2_gc_gens(c);
-#endif
- debug_check_no_locks_held();
- }
-
- return 0;
+ struct bch_fs *c = container_of(work, struct bch_fs, gc_gens_work);
+ bch2_gc_gens(c);
+ bch2_write_ref_put(c, BCH_WRITE_REF_gc_gens);
}
-void bch2_gc_thread_stop(struct bch_fs *c)
+void bch2_gc_gens_async(struct bch_fs *c)
{
- struct task_struct *p;
-
- p = c->gc_thread;
- c->gc_thread = NULL;
-
- if (p) {
- kthread_stop(p);
- put_task_struct(p);
- }
+ if (bch2_write_ref_tryget(c, BCH_WRITE_REF_gc_gens) &&
+ !queue_work(c->write_ref_wq, &c->gc_gens_work))
+ bch2_write_ref_put(c, BCH_WRITE_REF_gc_gens);
}
-int bch2_gc_thread_start(struct bch_fs *c)
+void bch2_fs_gc_init(struct bch_fs *c)
{
- struct task_struct *p;
-
- if (c->gc_thread)
- return 0;
+ seqcount_init(&c->gc_pos_lock);
- p = kthread_create(bch2_gc_thread, c, "bch-gc/%s", c->name);
- if (IS_ERR(p)) {
- bch_err_fn(c, PTR_ERR(p));
- return PTR_ERR(p);
- }
-
- get_task_struct(p);
- c->gc_thread = p;
- wake_up_process(p);
- return 0;
+ INIT_WORK(&c->gc_gens_work, bch2_gc_gens_work);
}
diff --git a/fs/bcachefs/btree_gc.h b/fs/bcachefs/btree_gc.h
index 607575f83a00..1b6489d8e0f4 100644
--- a/fs/bcachefs/btree_gc.h
+++ b/fs/bcachefs/btree_gc.h
@@ -6,10 +6,7 @@
#include "btree_types.h"
int bch2_check_topology(struct bch_fs *);
-int bch2_gc(struct bch_fs *, bool, bool);
-int bch2_gc_gens(struct bch_fs *);
-void bch2_gc_thread_stop(struct bch_fs *);
-int bch2_gc_thread_start(struct bch_fs *);
+int bch2_check_allocations(struct bch_fs *);
/*
* For concurrent mark and sweep (with other index updates), we define a total
@@ -37,16 +34,16 @@ static inline struct gc_pos gc_phase(enum gc_phase phase)
{
return (struct gc_pos) {
.phase = phase,
- .pos = POS_MIN,
.level = 0,
+ .pos = POS_MIN,
};
}
static inline int gc_pos_cmp(struct gc_pos l, struct gc_pos r)
{
- return cmp_int(l.phase, r.phase) ?:
- bpos_cmp(l.pos, r.pos) ?:
- cmp_int(l.level, r.level);
+ return cmp_int(l.phase, r.phase) ?:
+ -cmp_int(l.level, r.level) ?:
+ bpos_cmp(l.pos, r.pos);
}
static inline enum gc_phase btree_id_to_gc_phase(enum btree_id id)
@@ -60,13 +57,13 @@ static inline enum gc_phase btree_id_to_gc_phase(enum btree_id id)
}
}
-static inline struct gc_pos gc_pos_btree(enum btree_id id,
- struct bpos pos, unsigned level)
+static inline struct gc_pos gc_pos_btree(enum btree_id btree, unsigned level,
+ struct bpos pos)
{
return (struct gc_pos) {
- .phase = btree_id_to_gc_phase(id),
- .pos = pos,
+ .phase = btree_id_to_gc_phase(btree),
.level = level,
+ .pos = pos,
};
}
@@ -76,19 +73,7 @@ static inline struct gc_pos gc_pos_btree(enum btree_id id,
*/
static inline struct gc_pos gc_pos_btree_node(struct btree *b)
{
- return gc_pos_btree(b->c.btree_id, b->key.k.p, b->c.level);
-}
-
-/*
- * GC position of the pointer to a btree root: we don't use
- * gc_pos_pointer_to_btree_node() here to avoid a potential race with
- * btree_split() increasing the tree depth - the new root will have level > the
- * old root and thus have a greater gc position than the old root, but that
- * would be incorrect since once gc has marked the root it's not coming back.
- */
-static inline struct gc_pos gc_pos_btree_root(enum btree_id id)
-{
- return gc_pos_btree(id, SPOS_MAX, BTREE_MAX_DEPTH);
+ return gc_pos_btree(b->c.btree_id, b->c.level, b->key.k.p);
}
static inline bool gc_visited(struct bch_fs *c, struct gc_pos pos)
@@ -104,11 +89,8 @@ static inline bool gc_visited(struct bch_fs *c, struct gc_pos pos)
return ret;
}
-static inline void bch2_do_gc_gens(struct bch_fs *c)
-{
- atomic_inc(&c->kick_gc);
- if (c->gc_thread)
- wake_up_process(c->gc_thread);
-}
+int bch2_gc_gens(struct bch_fs *);
+void bch2_gc_gens_async(struct bch_fs *);
+void bch2_fs_gc_init(struct bch_fs *);
#endif /* _BCACHEFS_BTREE_GC_H */
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index 34df8ccc5fec..cbf8f5d90602 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -23,6 +23,18 @@
#include <linux/sched/mm.h>
+static void bch2_btree_node_header_to_text(struct printbuf *out, struct btree_node *bn)
+{
+ prt_printf(out, "btree=%s l=%u seq %llux\n",
+ bch2_btree_id_str(BTREE_NODE_ID(bn)),
+ (unsigned) BTREE_NODE_LEVEL(bn), bn->keys.seq);
+ prt_str(out, "min: ");
+ bch2_bpos_to_text(out, bn->min_key);
+ prt_newline(out);
+ prt_str(out, "max: ");
+ bch2_bpos_to_text(out, bn->max_key);
+}
+
void bch2_btree_node_io_unlock(struct btree *b)
{
EBUG_ON(!btree_node_write_in_flight(b));
@@ -217,7 +229,6 @@ static bool should_compact_bset(struct btree *b, struct bset_tree *t,
static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
{
- struct bset_tree *t;
bool ret = false;
for_each_bset(b, t) {
@@ -288,8 +299,7 @@ bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
static void btree_node_sort(struct bch_fs *c, struct btree *b,
unsigned start_idx,
- unsigned end_idx,
- bool filter_whiteouts)
+ unsigned end_idx)
{
struct btree_node *out;
struct sort_iter_stack sort_iter;
@@ -320,7 +330,7 @@ static void btree_node_sort(struct bch_fs *c, struct btree *b,
start_time = local_clock();
- u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter, filter_whiteouts);
+ u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter);
out->keys.u64s = cpu_to_le16(u64s);
@@ -426,13 +436,12 @@ static bool btree_node_compact(struct bch_fs *c, struct btree *b)
break;
if (b->nsets - unwritten_idx > 1) {
- btree_node_sort(c, b, unwritten_idx,
- b->nsets, false);
+ btree_node_sort(c, b, unwritten_idx, b->nsets);
ret = true;
}
if (unwritten_idx > 1) {
- btree_node_sort(c, b, 0, unwritten_idx, false);
+ btree_node_sort(c, b, 0, unwritten_idx);
ret = true;
}
@@ -441,8 +450,6 @@ static bool btree_node_compact(struct bch_fs *c, struct btree *b)
void bch2_btree_build_aux_trees(struct btree *b)
{
- struct bset_tree *t;
-
for_each_bset(b, t)
bch2_bset_build_aux_tree(b, t,
!bset_written(b, bset(b, t)) &&
@@ -524,7 +531,9 @@ static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
prt_printf(out, "at btree ");
bch2_btree_pos_to_text(out, c, b);
- prt_printf(out, "\n node offset %u/%u",
+ printbuf_indent_add(out, 2);
+
+ prt_printf(out, "\nnode offset %u/%u",
b->written, btree_ptr_sectors_written(&b->key));
if (i)
prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s));
@@ -543,6 +552,7 @@ static int __btree_err(int ret,
const char *fmt, ...)
{
struct printbuf out = PRINTBUF;
+ bool silent = c->curr_recovery_pass == BCH_RECOVERY_PASS_scan_for_btree_nodes;
va_list args;
btree_err_msg(&out, c, ca, b, i, b->written, write);
@@ -564,12 +574,14 @@ static int __btree_err(int ret,
if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry)
ret = -BCH_ERR_btree_node_read_err_bad_node;
- if (ret != -BCH_ERR_btree_node_read_err_fixable)
+ if (!silent && ret != -BCH_ERR_btree_node_read_err_fixable)
bch2_sb_error_count(c, err_type);
switch (ret) {
case -BCH_ERR_btree_node_read_err_fixable:
- ret = bch2_fsck_err(c, FSCK_CAN_FIX, err_type, "%s", out.buf);
+ ret = !silent
+ ? bch2_fsck_err(c, FSCK_CAN_FIX, err_type, "%s", out.buf)
+ : -BCH_ERR_fsck_fix;
if (ret != -BCH_ERR_fsck_fix &&
ret != -BCH_ERR_fsck_ignore)
goto fsck_err;
@@ -577,14 +589,17 @@ static int __btree_err(int ret,
break;
case -BCH_ERR_btree_node_read_err_want_retry:
case -BCH_ERR_btree_node_read_err_must_retry:
- bch2_print_string_as_lines(KERN_ERR, out.buf);
+ if (!silent)
+ bch2_print_string_as_lines(KERN_ERR, out.buf);
break;
case -BCH_ERR_btree_node_read_err_bad_node:
- bch2_print_string_as_lines(KERN_ERR, out.buf);
+ if (!silent)
+ bch2_print_string_as_lines(KERN_ERR, out.buf);
ret = bch2_topology_error(c);
break;
case -BCH_ERR_btree_node_read_err_incompatible:
- bch2_print_string_as_lines(KERN_ERR, out.buf);
+ if (!silent)
+ bch2_print_string_as_lines(KERN_ERR, out.buf);
ret = -BCH_ERR_fsck_errors_not_fixed;
break;
default:
@@ -619,8 +634,6 @@ fsck_err:
__cold
void bch2_btree_node_drop_keys_outside_node(struct btree *b)
{
- struct bset_tree *t;
-
for_each_bset(b, t) {
struct bset *i = bset(b, t);
struct bkey_packed *k;
@@ -654,6 +667,7 @@ void bch2_btree_node_drop_keys_outside_node(struct btree *b)
*/
bch2_bset_set_no_aux_tree(b, b->set);
bch2_btree_build_aux_trees(b);
+ b->nr = bch2_btree_node_count_keys(b);
struct bkey_s_c k;
struct bkey unpacked;
@@ -830,7 +844,7 @@ static int bset_key_invalid(struct bch_fs *c, struct btree *b,
(rw == WRITE ? bch2_bkey_val_invalid(c, k, READ, err) : 0);
}
-static bool __bkey_valid(struct bch_fs *c, struct btree *b,
+static bool bkey_packed_valid(struct bch_fs *c, struct btree *b,
struct bset *i, struct bkey_packed *k)
{
if (bkey_p_next(k) > vstruct_last(i))
@@ -839,7 +853,7 @@ static bool __bkey_valid(struct bch_fs *c, struct btree *b,
if (k->format > KEY_FORMAT_CURRENT)
return false;
- if (k->u64s < bkeyp_key_u64s(&b->format, k))
+ if (!bkeyp_u64s_valid(&b->format, k))
return false;
struct printbuf buf = PRINTBUF;
@@ -883,11 +897,13 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
"invalid bkey format %u", k->format))
goto drop_this_key;
- if (btree_err_on(k->u64s < bkeyp_key_u64s(&b->format, k),
+ if (btree_err_on(!bkeyp_u64s_valid(&b->format, k),
-BCH_ERR_btree_node_read_err_fixable,
c, NULL, b, i,
btree_node_bkey_bad_u64s,
- "k->u64s too small (%u < %u)", k->u64s, bkeyp_key_u64s(&b->format, k)))
+ "bad k->u64s %u (min %u max %zu)", k->u64s,
+ bkeyp_key_u64s(&b->format, k),
+ U8_MAX - BKEY_U64s + bkeyp_key_u64s(&b->format, k)))
goto drop_this_key;
if (!write)
@@ -946,13 +962,12 @@ drop_this_key:
* do
*/
- if (!__bkey_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) {
+ if (!bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) {
for (next_good_key = 1;
next_good_key < (u64 *) vstruct_last(i) - (u64 *) k;
next_good_key++)
- if (__bkey_valid(c, b, i, (void *) ((u64 *) k + next_good_key)))
+ if (bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key)))
goto got_good_key;
-
}
/*
@@ -1019,18 +1034,19 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
-BCH_ERR_btree_node_read_err_must_retry,
c, ca, b, NULL,
btree_node_bad_seq,
- "got wrong btree node (want %llx got %llx)\n"
- "got btree %s level %llu pos %s",
- bp->seq, b->data->keys.seq,
- bch2_btree_id_str(BTREE_NODE_ID(b->data)),
- BTREE_NODE_LEVEL(b->data),
- buf.buf);
+ "got wrong btree node: got\n%s",
+ (printbuf_reset(&buf),
+ bch2_btree_node_header_to_text(&buf, b->data),
+ buf.buf));
} else {
btree_err_on(!b->data->keys.seq,
-BCH_ERR_btree_node_read_err_must_retry,
c, ca, b, NULL,
btree_node_bad_seq,
- "bad btree header: seq 0");
+ "bad btree header: seq 0\n%s",
+ (printbuf_reset(&buf),
+ bch2_btree_node_header_to_text(&buf, b->data),
+ buf.buf));
}
while (b->written < (ptr_written ?: btree_sectors(c))) {
@@ -1093,7 +1109,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
nonce = btree_nonce(i, b->written << 9);
struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
csum_bad = bch2_crc_cmp(bne->csum, csum);
- if (csum_bad)
+ if (ca && csum_bad)
bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
btree_err_on(csum_bad,
@@ -1247,12 +1263,14 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
btree_node_reset_sib_u64s(b);
+ rcu_read_lock();
bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
- struct bch_dev *ca2 = bch_dev_bkey_exists(c, ptr->dev);
+ struct bch_dev *ca2 = bch2_dev_rcu(c, ptr->dev);
- if (ca2->mi.state != BCH_MEMBER_STATE_rw)
+ if (!ca2 || ca2->mi.state != BCH_MEMBER_STATE_rw)
set_btree_node_need_rewrite(b);
}
+ rcu_read_unlock();
if (!ptr_written)
set_btree_node_need_rewrite(b);
@@ -1263,10 +1281,12 @@ out:
return retry_read;
fsck_err:
if (ret == -BCH_ERR_btree_node_read_err_want_retry ||
- ret == -BCH_ERR_btree_node_read_err_must_retry)
+ ret == -BCH_ERR_btree_node_read_err_must_retry) {
retry_read = 1;
- else
+ } else {
set_btree_node_read_error(b);
+ bch2_btree_lost_data(c, b->c.btree_id);
+ }
goto out;
}
@@ -1275,8 +1295,8 @@ static void btree_node_read_work(struct work_struct *work)
struct btree_read_bio *rb =
container_of(work, struct btree_read_bio, work);
struct bch_fs *c = rb->c;
+ struct bch_dev *ca = rb->have_ioref ? bch2_dev_have_ref(c, rb->pick.ptr.dev) : NULL;
struct btree *b = rb->b;
- struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
struct bio *bio = &rb->bio;
struct bch_io_failures failed = { .nr = 0 };
struct printbuf buf = PRINTBUF;
@@ -1288,8 +1308,8 @@ static void btree_node_read_work(struct work_struct *work)
while (1) {
retry = true;
bch_info(c, "retrying read");
- ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
- rb->have_ioref = bch2_dev_get_ioref(ca, READ);
+ ca = bch2_dev_get_ioref(c, rb->pick.ptr.dev, READ);
+ rb->have_ioref = ca != NULL;
bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
bio->bi_iter.bi_sector = rb->pick.ptr.offset;
bio->bi_iter.bi_size = btree_buf_bytes(b);
@@ -1303,7 +1323,7 @@ static void btree_node_read_work(struct work_struct *work)
start:
printbuf_reset(&buf);
bch2_btree_pos_to_text(&buf, c, b);
- bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_read,
+ bch2_dev_io_err_on(ca && bio->bi_status, ca, BCH_MEMBER_ERROR_read,
"btree read error %s for %s",
bch2_blk_status_to_str(bio->bi_status), buf.buf);
if (rb->have_ioref)
@@ -1327,6 +1347,7 @@ start:
if (!can_retry) {
set_btree_node_read_error(b);
+ bch2_btree_lost_data(c, b->c.btree_id);
break;
}
}
@@ -1335,7 +1356,9 @@ start:
rb->start_time);
bio_put(&rb->bio);
- if (saw_error && !btree_node_read_error(b)) {
+ if (saw_error &&
+ !btree_node_read_error(b) &&
+ c->curr_recovery_pass != BCH_RECOVERY_PASS_scan_for_btree_nodes) {
printbuf_reset(&buf);
bch2_bpos_to_text(&buf, b->key.k.p);
bch_err_ratelimited(c, "%s: rewriting btree node at btree=%s level=%u %s due to error",
@@ -1356,7 +1379,7 @@ static void btree_node_read_endio(struct bio *bio)
struct bch_fs *c = rb->c;
if (rb->have_ioref) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
+ struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev);
bch2_latency_acct(ca, rb->start_time, READ);
}
@@ -1526,9 +1549,10 @@ fsck_err:
ret = -1;
}
- if (ret)
+ if (ret) {
set_btree_node_read_error(b);
- else if (*saw_error)
+ bch2_btree_lost_data(c, b->c.btree_id);
+ } else if (*saw_error)
bch2_btree_node_rewrite_async(c, b);
for (i = 0; i < ra->nr; i++) {
@@ -1552,7 +1576,7 @@ static void btree_node_read_all_replicas_endio(struct bio *bio)
struct btree_node_read_all *ra = rb->ra;
if (rb->have_ioref) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
+ struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev);
bch2_latency_acct(ca, rb->start_time, READ);
}
@@ -1594,14 +1618,14 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool
i = 0;
bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, pick.ptr.dev);
+ struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
struct btree_read_bio *rb =
container_of(ra->bio[i], struct btree_read_bio, bio);
rb->c = c;
rb->b = b;
rb->ra = ra;
rb->start_time = local_clock();
- rb->have_ioref = bch2_dev_get_ioref(ca, READ);
+ rb->have_ioref = ca != NULL;
rb->idx = i;
rb->pick = pick;
rb->bio.bi_iter.bi_sector = pick.ptr.offset;
@@ -1657,20 +1681,21 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
prt_str(&buf, "btree node read error: no device to read from\n at ");
bch2_btree_pos_to_text(&buf, c, b);
- bch_err(c, "%s", buf.buf);
+ bch_err_ratelimited(c, "%s", buf.buf);
if (c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology)
bch2_fatal_error(c);
set_btree_node_read_error(b);
+ bch2_btree_lost_data(c, b->c.btree_id);
clear_btree_node_read_in_flight(b);
wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
printbuf_exit(&buf);
return;
}
- ca = bch_dev_bkey_exists(c, pick.ptr.dev);
+ ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
bio = bio_alloc_bioset(NULL,
buf_pages(b->data, btree_buf_bytes(b)),
@@ -1682,7 +1707,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
rb->b = b;
rb->ra = NULL;
rb->start_time = local_clock();
- rb->have_ioref = bch2_dev_get_ioref(ca, READ);
+ rb->have_ioref = ca != NULL;
rb->pick = pick;
INIT_WORK(&rb->work, btree_node_read_work);
bio->bi_iter.bi_sector = pick.ptr.offset;
@@ -1837,7 +1862,6 @@ static void btree_node_write_work(struct work_struct *work)
container_of(work, struct btree_write_bio, work);
struct bch_fs *c = wbio->wbio.c;
struct btree *b = wbio->wbio.bio.bi_private;
- struct bch_extent_ptr *ptr;
int ret = 0;
btree_bounce_free(c,
@@ -1860,7 +1884,7 @@ static void btree_node_write_work(struct work_struct *work)
} else {
ret = bch2_trans_do(c, NULL, NULL, 0,
bch2_btree_node_update_key_get_iter(trans, b, &wbio->key,
- BCH_WATERMARK_reclaim|
+ BCH_WATERMARK_interior_updates|
BCH_TRANS_COMMIT_journal_reclaim|
BCH_TRANS_COMMIT_no_enospc|
BCH_TRANS_COMMIT_no_check_rw,
@@ -1887,13 +1911,14 @@ static void btree_node_write_endio(struct bio *bio)
struct btree_write_bio *wb = container_of(orig, struct btree_write_bio, wbio);
struct bch_fs *c = wbio->c;
struct btree *b = wbio->bio.bi_private;
- struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
+ struct bch_dev *ca = wbio->have_ioref ? bch2_dev_have_ref(c, wbio->dev) : NULL;
unsigned long flags;
if (wbio->have_ioref)
bch2_latency_acct(ca, wbio->submit_time, WRITE);
- if (bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
+ if (!ca ||
+ bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
"btree write error: %s",
bch2_blk_status_to_str(bio->bi_status)) ||
bch2_meta_write_fault("btree")) {
@@ -1960,7 +1985,6 @@ static void btree_write_submit(struct work_struct *work)
void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
{
struct btree_write_bio *wbio;
- struct bset_tree *t;
struct bset *i;
struct btree_node *bn = NULL;
struct btree_node_entry *bne = NULL;
@@ -2086,11 +2110,11 @@ do_write:
unwritten_whiteouts_end(b));
SET_BSET_SEPARATE_WHITEOUTS(i, false);
- b->whiteout_u64s = 0;
-
- u64s = bch2_sort_keys(i->start, &sort_iter.iter, false);
+ u64s = bch2_sort_keys_keep_unwritten_whiteouts(i->start, &sort_iter.iter);
le16_add_cpu(&i->u64s, u64s);
+ b->whiteout_u64s = 0;
+
BUG_ON(!b->written && i->u64s != b->data->keys.u64s);
set_needs_whiteout(i, false);
@@ -2217,7 +2241,6 @@ bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
{
bool invalidated_iter = false;
struct btree_node_entry *bne;
- struct bset_tree *t;
if (!btree_node_just_written(b))
return false;
@@ -2240,7 +2263,7 @@ bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
* single bset:
*/
if (b->nsets > 1) {
- btree_node_sort(c, b, 0, b->nsets, true);
+ btree_node_sort(c, b, 0, b->nsets);
invalidated_iter = true;
} else {
invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
@@ -2337,20 +2360,13 @@ void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c)
printbuf_tabstop_push(out, 20);
printbuf_tabstop_push(out, 10);
- prt_tab(out);
- prt_str(out, "nr");
- prt_tab(out);
- prt_str(out, "size");
- prt_newline(out);
+ prt_printf(out, "\tnr\tsize\n");
for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) {
u64 nr = atomic64_read(&c->btree_write_stats[i].nr);
u64 bytes = atomic64_read(&c->btree_write_stats[i].bytes);
- prt_printf(out, "%s:", bch2_btree_write_types[i]);
- prt_tab(out);
- prt_u64(out, nr);
- prt_tab(out);
+ prt_printf(out, "%s:\t%llu\t", bch2_btree_write_types[i], nr);
prt_human_readable_u64(out, nr ? div64_u64(bytes, nr) : 0);
prt_newline(out);
}
diff --git a/fs/bcachefs/btree_io.h b/fs/bcachefs/btree_io.h
index e251cb6b965f..2b8b564fc560 100644
--- a/fs/bcachefs/btree_io.h
+++ b/fs/bcachefs/btree_io.h
@@ -81,8 +81,6 @@ static inline bool should_compact_bset_lazy(struct btree *b,
static inline bool bch2_maybe_compact_whiteouts(struct bch_fs *c, struct btree *b)
{
- struct bset_tree *t;
-
for_each_bset(b, t)
if (should_compact_bset_lazy(b, t))
return bch2_compact_whiteouts(c, b, COMPACT_LAZY);
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 51bcdc6c6d1c..5bf98cb8b15d 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -61,7 +61,7 @@ static inline int btree_path_cmp(const struct btree_path *l,
static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
{
/* Are we iterating over keys in all snapshots? */
- if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
+ if (iter->flags & BTREE_ITER_all_snapshots) {
p = bpos_successor(p);
} else {
p = bpos_nosnap_successor(p);
@@ -74,7 +74,7 @@ static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
{
/* Are we iterating over keys in all snapshots? */
- if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
+ if (iter->flags & BTREE_ITER_all_snapshots) {
p = bpos_predecessor(p);
} else {
p = bpos_nosnap_predecessor(p);
@@ -88,7 +88,7 @@ static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
{
struct bpos pos = iter->pos;
- if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
+ if ((iter->flags & BTREE_ITER_is_extents) &&
!bkey_eq(pos, POS_MAX))
pos = bkey_successor(iter, pos);
return pos;
@@ -253,13 +253,13 @@ static void bch2_btree_iter_verify(struct btree_iter *iter)
BUG_ON(iter->btree_id >= BTREE_ID_NR);
- BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != btree_iter_path(trans, iter)->cached);
+ BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached);
- BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
- (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
+ BUG_ON((iter->flags & BTREE_ITER_is_extents) &&
+ (iter->flags & BTREE_ITER_all_snapshots));
- BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
- (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
+ BUG_ON(!(iter->flags & BTREE_ITER_snapshot_field) &&
+ (iter->flags & BTREE_ITER_all_snapshots) &&
!btree_type_has_snapshot_field(iter->btree_id));
if (iter->update_path)
@@ -269,10 +269,10 @@ static void bch2_btree_iter_verify(struct btree_iter *iter)
static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
{
- BUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
+ BUG_ON((iter->flags & BTREE_ITER_filter_snapshots) &&
!iter->pos.snapshot);
- BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
+ BUG_ON(!(iter->flags & BTREE_ITER_all_snapshots) &&
iter->pos.snapshot != iter->snapshot);
BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) ||
@@ -289,7 +289,7 @@ static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k
if (!bch2_debug_check_iterators)
return 0;
- if (!(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS))
+ if (!(iter->flags & BTREE_ITER_filter_snapshots))
return 0;
if (bkey_err(k) || !k.k)
@@ -300,8 +300,8 @@ static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k
k.k->p.snapshot));
bch2_trans_iter_init(trans, &copy, iter->btree_id, iter->pos,
- BTREE_ITER_NOPRESERVE|
- BTREE_ITER_ALL_SNAPSHOTS);
+ BTREE_ITER_nopreserve|
+ BTREE_ITER_all_snapshots);
prev = bch2_btree_iter_prev(&copy);
if (!prev.k)
goto out;
@@ -897,7 +897,7 @@ static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
bch2_bkey_buf_reassemble(out, c, k);
- if ((flags & BTREE_ITER_PREFETCH) &&
+ if ((flags & BTREE_ITER_prefetch) &&
c->opts.btree_node_prefetch)
ret = btree_path_prefetch_j(trans, path, &jiter);
@@ -927,10 +927,24 @@ static __always_inline int btree_path_down(struct btree_trans *trans,
if (ret)
goto err;
} else {
- bch2_bkey_buf_unpack(&tmp, c, l->b,
- bch2_btree_node_iter_peek(&l->iter, l->b));
+ struct bkey_packed *k = bch2_btree_node_iter_peek(&l->iter, l->b);
+ if (!k) {
+ struct printbuf buf = PRINTBUF;
+
+ prt_str(&buf, "node not found at pos ");
+ bch2_bpos_to_text(&buf, path->pos);
+ prt_str(&buf, " within parent node ");
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&l->b->key));
+
+ bch2_fs_fatal_error(c, "%s", buf.buf);
+ printbuf_exit(&buf);
+ ret = -BCH_ERR_btree_need_topology_repair;
+ goto err;
+ }
+
+ bch2_bkey_buf_unpack(&tmp, c, l->b, k);
- if ((flags & BTREE_ITER_PREFETCH) &&
+ if ((flags & BTREE_ITER_prefetch) &&
c->opts.btree_node_prefetch) {
ret = btree_path_prefetch(trans, path);
if (ret)
@@ -962,7 +976,6 @@ err:
return ret;
}
-
static int bch2_btree_path_traverse_all(struct btree_trans *trans)
{
struct bch_fs *c = trans->c;
@@ -986,6 +999,7 @@ retry_all:
bch2_trans_unlock(trans);
cond_resched();
+ trans->locked = true;
if (unlikely(trans->memory_allocation_failure)) {
struct closure cl;
@@ -1149,6 +1163,7 @@ int bch2_btree_path_traverse_one(struct btree_trans *trans,
goto out_uptodate;
path->level = btree_path_up_until_good_node(trans, path, 0);
+ unsigned max_level = path->level;
EBUG_ON(btree_path_node(path, path->level) &&
!btree_node_locked(path, path->level));
@@ -1179,6 +1194,16 @@ int bch2_btree_path_traverse_one(struct btree_trans *trans,
goto out;
}
}
+
+ if (unlikely(max_level > path->level)) {
+ struct btree_path *linked;
+ unsigned iter;
+
+ trans_for_each_path_with_node(trans, path_l(path)->b, linked, iter)
+ for (unsigned j = path->level + 1; j < max_level; j++)
+ linked->l[j] = path->l[j];
+ }
+
out_uptodate:
path->uptodate = BTREE_ITER_UPTODATE;
out:
@@ -1208,11 +1233,14 @@ static inline void btree_path_copy(struct btree_trans *trans, struct btree_path
}
static btree_path_idx_t btree_path_clone(struct btree_trans *trans, btree_path_idx_t src,
- bool intent)
+ bool intent, unsigned long ip)
{
btree_path_idx_t new = btree_path_alloc(trans, src);
btree_path_copy(trans, trans->paths + new, trans->paths + src);
__btree_path_get(trans->paths + new, intent);
+#ifdef TRACK_PATH_ALLOCATED
+ trans->paths[new].ip_allocated = ip;
+#endif
return new;
}
@@ -1221,7 +1249,7 @@ btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *trans,
btree_path_idx_t path, bool intent, unsigned long ip)
{
__btree_path_put(trans->paths + path, intent);
- path = btree_path_clone(trans, path, intent);
+ path = btree_path_clone(trans, path, intent, ip);
trans->paths[path].preserve = false;
return path;
}
@@ -1321,6 +1349,26 @@ static inline void __bch2_path_free(struct btree_trans *trans, btree_path_idx_t
__clear_bit(path, trans->paths_allocated);
}
+static bool bch2_btree_path_can_relock(struct btree_trans *trans, struct btree_path *path)
+{
+ unsigned l = path->level;
+
+ do {
+ if (!btree_path_node(path, l))
+ break;
+
+ if (!is_btree_node(path, l))
+ return false;
+
+ if (path->l[l].lock_seq != path->l[l].b->c.lock.seq)
+ return false;
+
+ l++;
+ } while (l < path->locks_want);
+
+ return true;
+}
+
void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool intent)
{
struct btree_path *path = trans->paths + path_idx, *dup;
@@ -1335,10 +1383,15 @@ void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool in
if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
return;
- if (path->should_be_locked &&
- !trans->restarted &&
- (!dup || !bch2_btree_path_relock_norestart(trans, dup)))
- return;
+ if (path->should_be_locked && !trans->restarted) {
+ if (!dup)
+ return;
+
+ if (!(trans->locked
+ ? bch2_btree_path_relock_norestart(trans, dup)
+ : bch2_btree_path_can_relock(trans, dup)))
+ return;
+ }
if (dup) {
dup->preserve |= path->preserve;
@@ -1371,22 +1424,26 @@ void __noreturn bch2_trans_in_restart_error(struct btree_trans *trans)
(void *) trans->last_restarted_ip);
}
+void __noreturn bch2_trans_unlocked_error(struct btree_trans *trans)
+{
+ panic("trans should be locked, unlocked by %pS\n",
+ (void *) trans->last_unlock_ip);
+}
+
noinline __cold
void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
{
- prt_printf(buf, "transaction updates for %s journal seq %llu",
+ prt_printf(buf, "transaction updates for %s journal seq %llu\n",
trans->fn, trans->journal_res.seq);
- prt_newline(buf);
printbuf_indent_add(buf, 2);
trans_for_each_update(trans, i) {
struct bkey_s_c old = { &i->old_k, i->old_v };
- prt_printf(buf, "update: btree=%s cached=%u %pS",
+ prt_printf(buf, "update: btree=%s cached=%u %pS\n",
bch2_btree_id_str(i->btree_id),
i->cached,
(void *) i->ip_allocated);
- prt_newline(buf);
prt_printf(buf, " old ");
bch2_bkey_val_to_text(buf, trans->c, old);
@@ -1415,23 +1472,63 @@ void bch2_dump_trans_updates(struct btree_trans *trans)
printbuf_exit(&buf);
}
-static void bch2_btree_path_to_text(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
+static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
{
struct btree_path *path = trans->paths + path_idx;
- prt_printf(out, "path: idx %2u ref %u:%u %c %c btree=%s l=%u pos ",
+ prt_printf(out, "path: idx %2u ref %u:%u %c %c %c btree=%s l=%u pos ",
path_idx, path->ref, path->intent_ref,
path->preserve ? 'P' : ' ',
path->should_be_locked ? 'S' : ' ',
+ path->cached ? 'C' : 'B',
bch2_btree_id_str(path->btree_id),
path->level);
bch2_bpos_to_text(out, path->pos);
- prt_printf(out, " locks %u", path->nodes_locked);
#ifdef TRACK_PATH_ALLOCATED
prt_printf(out, " %pS", (void *) path->ip_allocated);
#endif
+}
+
+static const char *btree_node_locked_str(enum btree_node_locked_type t)
+{
+ switch (t) {
+ case BTREE_NODE_UNLOCKED:
+ return "unlocked";
+ case BTREE_NODE_READ_LOCKED:
+ return "read";
+ case BTREE_NODE_INTENT_LOCKED:
+ return "intent";
+ case BTREE_NODE_WRITE_LOCKED:
+ return "write";
+ default:
+ return NULL;
+ }
+}
+
+void bch2_btree_path_to_text(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
+{
+ bch2_btree_path_to_text_short(out, trans, path_idx);
+
+ struct btree_path *path = trans->paths + path_idx;
+
+ prt_printf(out, " uptodate %u locks_want %u", path->uptodate, path->locks_want);
prt_newline(out);
+
+ printbuf_indent_add(out, 2);
+ for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) {
+ prt_printf(out, "l=%u locks %s seq %u node ", l,
+ btree_node_locked_str(btree_node_locked_type(path, l)),
+ path->l[l].lock_seq);
+
+ int ret = PTR_ERR_OR_ZERO(path->l[l].b);
+ if (ret)
+ prt_str(out, bch2_err_str(ret));
+ else
+ prt_printf(out, "%px", path->l[l].b);
+ prt_newline(out);
+ }
+ printbuf_indent_sub(out, 2);
}
static noinline __cold
@@ -1443,8 +1540,10 @@ void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans,
if (!nosort)
btree_trans_sort_paths(trans);
- trans_for_each_path_idx_inorder(trans, iter)
- bch2_btree_path_to_text(out, trans, iter.path_idx);
+ trans_for_each_path_idx_inorder(trans, iter) {
+ bch2_btree_path_to_text_short(out, trans, iter.path_idx);
+ prt_newline(out);
+ }
}
noinline __cold
@@ -1595,11 +1694,12 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans,
unsigned flags, unsigned long ip)
{
struct btree_path *path;
- bool cached = flags & BTREE_ITER_CACHED;
- bool intent = flags & BTREE_ITER_INTENT;
+ bool cached = flags & BTREE_ITER_cached;
+ bool intent = flags & BTREE_ITER_intent;
struct trans_for_each_path_inorder_iter iter;
btree_path_idx_t path_pos = 0, path_idx;
+ bch2_trans_verify_not_unlocked(trans);
bch2_trans_verify_not_in_restart(trans);
bch2_trans_verify_locks(trans);
@@ -1644,7 +1744,7 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans,
trans->paths_sorted = false;
}
- if (!(flags & BTREE_ITER_NOPRESERVE))
+ if (!(flags & BTREE_ITER_nopreserve))
path->preserve = true;
if (path->intent_ref)
@@ -1665,6 +1765,22 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans,
return path_idx;
}
+btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *trans,
+ enum btree_id btree_id,
+ unsigned level,
+ struct bpos pos)
+{
+ btree_path_idx_t path_idx = bch2_path_get(trans, btree_id, pos, level + 1, level,
+ BTREE_ITER_nopreserve|
+ BTREE_ITER_intent, _RET_IP_);
+ path_idx = bch2_btree_path_make_mut(trans, path_idx, true, _RET_IP_);
+
+ struct btree_path *path = trans->paths + path_idx;
+ bch2_btree_path_downgrade(trans, path);
+ __bch2_btree_path_unlock(trans, path);
+ return path_idx;
+}
+
struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
{
@@ -1706,6 +1822,19 @@ hole:
return (struct bkey_s_c) { u, NULL };
}
+
+void bch2_set_btree_iter_dontneed(struct btree_iter *iter)
+{
+ struct btree_trans *trans = iter->trans;
+
+ if (!iter->path || trans->restarted)
+ return;
+
+ struct btree_path *path = btree_iter_path(trans, iter);
+ path->preserve = false;
+ if (path->ref == 1)
+ path->should_be_locked = false;
+}
/* Btree iterators: */
int __must_check
@@ -1720,9 +1849,11 @@ bch2_btree_iter_traverse(struct btree_iter *iter)
struct btree_trans *trans = iter->trans;
int ret;
+ bch2_trans_verify_not_unlocked(trans);
+
iter->path = bch2_btree_path_set_pos(trans, iter->path,
btree_iter_search_key(iter),
- iter->flags & BTREE_ITER_INTENT,
+ iter->flags & BTREE_ITER_intent,
btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
@@ -1761,7 +1892,7 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
iter->k.p = iter->pos = b->key.k.p;
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
- iter->flags & BTREE_ITER_INTENT,
+ iter->flags & BTREE_ITER_intent,
btree_iter_ip_allocated(iter));
btree_path_set_should_be_locked(btree_iter_path(trans, iter));
out:
@@ -1822,13 +1953,16 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
if (bpos_eq(iter->pos, b->key.k.p)) {
__btree_path_set_level_up(trans, path, path->level++);
} else {
+ if (btree_lock_want(path, path->level + 1) == BTREE_NODE_UNLOCKED)
+ btree_node_unlock(trans, path, path->level + 1);
+
/*
* Haven't gotten to the end of the parent node: go back down to
* the next child node
*/
iter->path = bch2_btree_path_set_pos(trans, iter->path,
bpos_successor(iter->pos),
- iter->flags & BTREE_ITER_INTENT,
+ iter->flags & BTREE_ITER_intent,
btree_iter_ip_allocated(iter));
path = btree_iter_path(trans, iter);
@@ -1846,7 +1980,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
iter->k.p = iter->pos = b->key.k.p;
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
- iter->flags & BTREE_ITER_INTENT,
+ iter->flags & BTREE_ITER_intent,
btree_iter_ip_allocated(iter));
btree_path_set_should_be_locked(btree_iter_path(trans, iter));
EBUG_ON(btree_iter_path(trans, iter)->uptodate);
@@ -1865,11 +1999,11 @@ err:
inline bool bch2_btree_iter_advance(struct btree_iter *iter)
{
struct bpos pos = iter->k.p;
- bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
+ bool ret = !(iter->flags & BTREE_ITER_all_snapshots
? bpos_eq(pos, SPOS_MAX)
: bkey_eq(pos, SPOS_MAX));
- if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
+ if (ret && !(iter->flags & BTREE_ITER_is_extents))
pos = bkey_successor(iter, pos);
bch2_btree_iter_set_pos(iter, pos);
return ret;
@@ -1878,11 +2012,11 @@ inline bool bch2_btree_iter_advance(struct btree_iter *iter)
inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
{
struct bpos pos = bkey_start_pos(&iter->k);
- bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
+ bool ret = !(iter->flags & BTREE_ITER_all_snapshots
? bpos_eq(pos, POS_MIN)
: bkey_eq(pos, POS_MIN));
- if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
+ if (ret && !(iter->flags & BTREE_ITER_is_extents))
pos = bkey_predecessor(iter, pos);
bch2_btree_iter_set_pos(iter, pos);
return ret;
@@ -1993,7 +2127,10 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos
struct bkey_s_c k;
int ret;
- if ((iter->flags & BTREE_ITER_KEY_CACHE_FILL) &&
+ bch2_trans_verify_not_in_restart(trans);
+ bch2_trans_verify_not_unlocked(trans);
+
+ if ((iter->flags & BTREE_ITER_key_cache_fill) &&
bpos_eq(iter->pos, pos))
return bkey_s_c_null;
@@ -2002,17 +2139,17 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos
if (!iter->key_cache_path)
iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
- iter->flags & BTREE_ITER_INTENT, 0,
- iter->flags|BTREE_ITER_CACHED|
- BTREE_ITER_CACHED_NOFILL,
+ iter->flags & BTREE_ITER_intent, 0,
+ iter->flags|BTREE_ITER_cached|
+ BTREE_ITER_cached_nofill,
_THIS_IP_);
iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
- iter->flags & BTREE_ITER_INTENT,
+ iter->flags & BTREE_ITER_intent,
btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(trans, iter->key_cache_path,
- iter->flags|BTREE_ITER_CACHED) ?:
+ iter->flags|BTREE_ITER_cached) ?:
bch2_btree_path_relock(trans, btree_iter_path(trans, iter), _THIS_IP_);
if (unlikely(ret))
return bkey_s_c_err(ret);
@@ -2040,7 +2177,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
struct btree_path_level *l;
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
- iter->flags & BTREE_ITER_INTENT,
+ iter->flags & BTREE_ITER_intent,
btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
@@ -2065,7 +2202,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
k = btree_path_level_peek_all(trans->c, l, &iter->k);
- if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
+ if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
k.k &&
(k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
k = k2;
@@ -2076,10 +2213,10 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
}
}
- if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL))
+ if (unlikely(iter->flags & BTREE_ITER_with_journal))
k = btree_trans_peek_journal(trans, iter, k);
- if (unlikely((iter->flags & BTREE_ITER_WITH_UPDATES) &&
+ if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
trans->nr_updates))
bch2_btree_trans_peek_updates(trans, iter, &k);
@@ -2131,11 +2268,12 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
struct bpos iter_pos;
int ret;
- EBUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) && bkey_eq(end, POS_MAX));
+ bch2_trans_verify_not_unlocked(trans);
+ EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && bkey_eq(end, POS_MAX));
if (iter->update_path) {
bch2_path_put_nokeep(trans, iter->update_path,
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_intent);
iter->update_path = 0;
}
@@ -2158,7 +2296,7 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
* isn't monotonically increasing before FILTER_SNAPSHOTS, and
* that's what we check against in extents mode:
*/
- if (unlikely(!(iter->flags & BTREE_ITER_IS_EXTENTS)
+ if (unlikely(!(iter->flags & BTREE_ITER_is_extents)
? bkey_gt(k.k->p, end)
: k.k->p.inode > end.inode))
goto end;
@@ -2166,13 +2304,13 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
if (iter->update_path &&
!bkey_eq(trans->paths[iter->update_path].pos, k.k->p)) {
bch2_path_put_nokeep(trans, iter->update_path,
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_intent);
iter->update_path = 0;
}
- if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
- (iter->flags & BTREE_ITER_INTENT) &&
- !(iter->flags & BTREE_ITER_IS_EXTENTS) &&
+ if ((iter->flags & BTREE_ITER_filter_snapshots) &&
+ (iter->flags & BTREE_ITER_intent) &&
+ !(iter->flags & BTREE_ITER_is_extents) &&
!iter->update_path) {
struct bpos pos = k.k->p;
@@ -2187,12 +2325,12 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
* advance, same as on exit for iter->path, but only up
* to snapshot
*/
- __btree_path_get(trans->paths + iter->path, iter->flags & BTREE_ITER_INTENT);
+ __btree_path_get(trans->paths + iter->path, iter->flags & BTREE_ITER_intent);
iter->update_path = iter->path;
iter->update_path = bch2_btree_path_set_pos(trans,
iter->update_path, pos,
- iter->flags & BTREE_ITER_INTENT,
+ iter->flags & BTREE_ITER_intent,
_THIS_IP_);
ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags);
if (unlikely(ret)) {
@@ -2205,7 +2343,7 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
* We can never have a key in a leaf node at POS_MAX, so
* we don't have to check these successor() calls:
*/
- if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
+ if ((iter->flags & BTREE_ITER_filter_snapshots) &&
!bch2_snapshot_is_ancestor(trans->c,
iter->snapshot,
k.k->p.snapshot)) {
@@ -2214,7 +2352,7 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
}
if (bkey_whiteout(k.k) &&
- !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
+ !(iter->flags & BTREE_ITER_all_snapshots)) {
search_key = bkey_successor(iter, k.k->p);
continue;
}
@@ -2224,12 +2362,12 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
* equal to the key we just returned - except extents can
* straddle iter->pos:
*/
- if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
+ if (!(iter->flags & BTREE_ITER_is_extents))
iter_pos = k.k->p;
else
iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
- if (unlikely(!(iter->flags & BTREE_ITER_IS_EXTENTS)
+ if (unlikely(!(iter->flags & BTREE_ITER_is_extents)
? bkey_gt(iter_pos, end)
: bkey_ge(iter_pos, end)))
goto end;
@@ -2240,7 +2378,7 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
iter->pos = iter_pos;
iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
- iter->flags & BTREE_ITER_INTENT,
+ iter->flags & BTREE_ITER_intent,
btree_iter_ip_allocated(iter));
btree_path_set_should_be_locked(btree_iter_path(trans, iter));
@@ -2253,7 +2391,7 @@ out_no_locked:
btree_path_set_should_be_locked(trans->paths + iter->update_path);
}
- if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
+ if (!(iter->flags & BTREE_ITER_all_snapshots))
iter->pos.snapshot = iter->snapshot;
ret = bch2_btree_iter_verify_ret(iter, k);
@@ -2303,21 +2441,22 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
btree_path_idx_t saved_path = 0;
int ret;
+ bch2_trans_verify_not_unlocked(trans);
EBUG_ON(btree_iter_path(trans, iter)->cached ||
btree_iter_path(trans, iter)->level);
- if (iter->flags & BTREE_ITER_WITH_JOURNAL)
+ if (iter->flags & BTREE_ITER_with_journal)
return bkey_s_c_err(-BCH_ERR_btree_iter_with_journal_not_supported);
bch2_btree_iter_verify(iter);
bch2_btree_iter_verify_entry_exit(iter);
- if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
+ if (iter->flags & BTREE_ITER_filter_snapshots)
search_key.snapshot = U32_MAX;
while (1) {
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
- iter->flags & BTREE_ITER_INTENT,
+ iter->flags & BTREE_ITER_intent,
btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
@@ -2332,17 +2471,17 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
k = btree_path_level_peek(trans, path, &path->l[0], &iter->k);
if (!k.k ||
- ((iter->flags & BTREE_ITER_IS_EXTENTS)
+ ((iter->flags & BTREE_ITER_is_extents)
? bpos_ge(bkey_start_pos(k.k), search_key)
: bpos_gt(k.k->p, search_key)))
k = btree_path_level_prev(trans, path, &path->l[0], &iter->k);
- if (unlikely((iter->flags & BTREE_ITER_WITH_UPDATES) &&
+ if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
trans->nr_updates))
bch2_btree_trans_peek_prev_updates(trans, iter, &k);
if (likely(k.k)) {
- if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
+ if (iter->flags & BTREE_ITER_filter_snapshots) {
if (k.k->p.snapshot == iter->snapshot)
goto got_key;
@@ -2353,7 +2492,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
*/
if (saved_path && !bkey_eq(k.k->p, saved_k.p)) {
bch2_path_put_nokeep(trans, iter->path,
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_intent);
iter->path = saved_path;
saved_path = 0;
iter->k = saved_k;
@@ -2366,9 +2505,10 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
k.k->p.snapshot)) {
if (saved_path)
bch2_path_put_nokeep(trans, saved_path,
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_intent);
saved_path = btree_path_clone(trans, iter->path,
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_intent,
+ _THIS_IP_);
path = btree_iter_path(trans, iter);
saved_k = *k.k;
saved_v = k.v;
@@ -2379,9 +2519,9 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
}
got_key:
if (bkey_whiteout(k.k) &&
- !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
+ !(iter->flags & BTREE_ITER_all_snapshots)) {
search_key = bkey_predecessor(iter, k.k->p);
- if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
+ if (iter->flags & BTREE_ITER_filter_snapshots)
search_key.snapshot = U32_MAX;
continue;
}
@@ -2405,11 +2545,11 @@ got_key:
if (bkey_lt(k.k->p, iter->pos))
iter->pos = k.k->p;
- if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
+ if (iter->flags & BTREE_ITER_filter_snapshots)
iter->pos.snapshot = iter->snapshot;
out_no_locked:
if (saved_path)
- bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_INTENT);
+ bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_intent);
bch2_btree_iter_verify_entry_exit(iter);
bch2_btree_iter_verify(iter);
@@ -2439,12 +2579,13 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
struct bkey_s_c k;
int ret;
+ bch2_trans_verify_not_unlocked(trans);
bch2_btree_iter_verify(iter);
bch2_btree_iter_verify_entry_exit(iter);
- EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_WITH_KEY_CACHE));
+ EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_with_key_cache));
/* extents can't span inode numbers: */
- if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
+ if ((iter->flags & BTREE_ITER_is_extents) &&
unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
if (iter->pos.inode == KEY_INODE_MAX)
return bkey_s_c_null;
@@ -2454,7 +2595,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
search_key = btree_iter_search_key(iter);
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
- iter->flags & BTREE_ITER_INTENT,
+ iter->flags & BTREE_ITER_intent,
btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
@@ -2463,22 +2604,22 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
goto out_no_locked;
}
- if ((iter->flags & BTREE_ITER_CACHED) ||
- !(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) {
+ if ((iter->flags & BTREE_ITER_cached) ||
+ !(iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots))) {
k = bkey_s_c_null;
- if (unlikely((iter->flags & BTREE_ITER_WITH_UPDATES) &&
+ if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
trans->nr_updates)) {
bch2_btree_trans_peek_slot_updates(trans, iter, &k);
if (k.k)
goto out;
}
- if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL) &&
+ if (unlikely(iter->flags & BTREE_ITER_with_journal) &&
(k = btree_trans_peek_slot_journal(trans, iter)).k)
goto out;
- if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
+ if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
(k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
if (!bkey_err(k))
iter->k = *k.k;
@@ -2493,12 +2634,12 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
struct bpos next;
struct bpos end = iter->pos;
- if (iter->flags & BTREE_ITER_IS_EXTENTS)
+ if (iter->flags & BTREE_ITER_is_extents)
end.offset = U64_MAX;
EBUG_ON(btree_iter_path(trans, iter)->level);
- if (iter->flags & BTREE_ITER_INTENT) {
+ if (iter->flags & BTREE_ITER_intent) {
struct btree_iter iter2;
bch2_trans_copy_iter(&iter2, iter);
@@ -2529,7 +2670,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
bkey_init(&iter->k);
iter->k.p = iter->pos;
- if (iter->flags & BTREE_ITER_IS_EXTENTS) {
+ if (iter->flags & BTREE_ITER_is_extents) {
bch2_key_resize(&iter->k,
min_t(u64, KEY_SIZE_MAX,
(next.inode == iter->pos.inode
@@ -2713,13 +2854,13 @@ void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
{
if (iter->update_path)
bch2_path_put_nokeep(trans, iter->update_path,
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_intent);
if (iter->path)
bch2_path_put(trans, iter->path,
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_intent);
if (iter->key_cache_path)
bch2_path_put(trans, iter->key_cache_path,
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_intent);
iter->path = 0;
iter->update_path = 0;
iter->key_cache_path = 0;
@@ -2744,9 +2885,9 @@ void bch2_trans_node_iter_init(struct btree_trans *trans,
unsigned depth,
unsigned flags)
{
- flags |= BTREE_ITER_NOT_EXTENTS;
- flags |= __BTREE_ITER_ALL_SNAPSHOTS;
- flags |= BTREE_ITER_ALL_SNAPSHOTS;
+ flags |= BTREE_ITER_not_extents;
+ flags |= BTREE_ITER_snapshot_field;
+ flags |= BTREE_ITER_all_snapshots;
bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
__bch2_btree_iter_flags(trans, btree_id, flags),
@@ -2769,9 +2910,9 @@ void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
dst->ip_allocated = _RET_IP_;
#endif
if (src->path)
- __btree_path_get(trans->paths + src->path, src->flags & BTREE_ITER_INTENT);
+ __btree_path_get(trans->paths + src->path, src->flags & BTREE_ITER_intent);
if (src->update_path)
- __btree_path_get(trans->paths + src->update_path, src->flags & BTREE_ITER_INTENT);
+ __btree_path_get(trans->paths + src->update_path, src->flags & BTREE_ITER_intent);
dst->key_cache_path = 0;
}
@@ -2790,6 +2931,31 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
struct btree_transaction_stats *s = btree_trans_stats(trans);
s->max_mem = max(s->max_mem, new_bytes);
+ if (trans->used_mempool) {
+ if (trans->mem_bytes >= new_bytes)
+ goto out_change_top;
+
+ /* No more space from mempool item, need malloc new one */
+ new_mem = kmalloc(new_bytes, GFP_NOWAIT|__GFP_NOWARN);
+ if (unlikely(!new_mem)) {
+ bch2_trans_unlock(trans);
+
+ new_mem = kmalloc(new_bytes, GFP_KERNEL);
+ if (!new_mem)
+ return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
+
+ ret = bch2_trans_relock(trans);
+ if (ret) {
+ kfree(new_mem);
+ return ERR_PTR(ret);
+ }
+ }
+ memcpy(new_mem, trans->mem, trans->mem_top);
+ trans->used_mempool = false;
+ mempool_free(trans->mem, &c->btree_trans_mem_pool);
+ goto out_new_mem;
+ }
+
new_mem = krealloc(trans->mem, new_bytes, GFP_NOWAIT|__GFP_NOWARN);
if (unlikely(!new_mem)) {
bch2_trans_unlock(trans);
@@ -2798,6 +2964,8 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
new_mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
new_bytes = BTREE_TRANS_MEM_MAX;
+ memcpy(new_mem, trans->mem, trans->mem_top);
+ trans->used_mempool = true;
kfree(trans->mem);
}
@@ -2811,7 +2979,7 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
if (ret)
return ERR_PTR(ret);
}
-
+out_new_mem:
trans->mem = new_mem;
trans->mem_bytes = new_bytes;
@@ -2819,7 +2987,7 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
trace_and_count(c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced));
}
-
+out_change_top:
p = trans->mem + trans->mem_top;
trans->mem_top += size;
memset(p, 0, size);
@@ -2913,7 +3081,8 @@ u32 bch2_trans_begin(struct btree_trans *trans)
if (!trans->restarted &&
(need_resched() ||
time_after64(now, trans->last_begin_time + BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS))) {
- drop_locks_do(trans, (cond_resched(), 0));
+ bch2_trans_unlock(trans);
+ cond_resched();
now = local_clock();
}
trans->last_begin_time = now;
@@ -2923,11 +3092,14 @@ u32 bch2_trans_begin(struct btree_trans *trans)
bch2_trans_srcu_unlock(trans);
trans->last_begin_ip = _RET_IP_;
+ trans->locked = true;
+
if (trans->restarted) {
bch2_btree_path_traverse_all(trans);
trans->notrace_relock_fail = false;
}
+ bch2_trans_verify_not_unlocked(trans);
return trans->restart_count;
}
@@ -2980,7 +3152,7 @@ struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
*/
BUG_ON(pos_task &&
pid == pos_task->pid &&
- bch2_trans_locked(pos));
+ pos->locked);
if (pos_task && pid < pos_task->pid) {
list_add_tail(&trans->list, &pos->list);
@@ -2996,8 +3168,9 @@ got_trans:
trans->last_begin_time = local_clock();
trans->fn_idx = fn_idx;
trans->locking_wait.task = current;
+ trans->locked = true;
trans->journal_replay_not_finished =
- unlikely(!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) &&
+ unlikely(!test_bit(JOURNAL_replay_done, &c->journal.flags)) &&
atomic_inc_not_zero(&c->journal_keys.ref);
trans->nr_paths = ARRAY_SIZE(trans->_paths);
trans->paths_allocated = trans->_paths_allocated;
@@ -3093,7 +3266,7 @@ void bch2_trans_put(struct btree_trans *trans)
if (paths_allocated != trans->_paths_allocated)
kvfree_rcu_mightsleep(paths_allocated);
- if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
+ if (trans->used_mempool)
mempool_free(trans->mem, &c->btree_trans_mem_pool);
else
kfree(trans->mem);
@@ -3126,13 +3299,11 @@ bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
pid = owner ? owner->pid : 0;
rcu_read_unlock();
- prt_tab(out);
- prt_printf(out, "%px %c l=%u %s:", b, b->cached ? 'c' : 'b',
+ prt_printf(out, "\t%px %c l=%u %s:", b, b->cached ? 'c' : 'b',
b->level, bch2_btree_id_str(b->btree_id));
bch2_bpos_to_text(out, btree_node_pos(b));
- prt_tab(out);
- prt_printf(out, " locks %u:%u:%u held by pid %u",
+ prt_printf(out, "\t locks %u:%u:%u held by pid %u",
c.n[0], c.n[1], c.n[2], pid);
}
@@ -3189,10 +3360,8 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
b = READ_ONCE(trans->locking);
if (b) {
- prt_printf(out, " blocked for %lluus on",
- div_u64(local_clock() - trans->locking_wait.start_time,
- 1000));
- prt_newline(out);
+ prt_printf(out, " blocked for %lluus on\n",
+ div_u64(local_clock() - trans->locking_wait.start_time, 1000));
prt_printf(out, " %c", lock_types[trans->locking_wait.lock_want]);
bch2_btree_bkey_cached_common_to_text(out, b);
prt_newline(out);
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index 24772538e4cc..eab2a25bdc7a 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -216,9 +216,13 @@ int __must_check bch2_btree_path_traverse_one(struct btree_trans *,
btree_path_idx_t,
unsigned, unsigned long);
+static inline void bch2_trans_verify_not_unlocked(struct btree_trans *);
+
static inline int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
btree_path_idx_t path, unsigned flags)
{
+ bch2_trans_verify_not_unlocked(trans);
+
if (trans->paths[path].uptodate < BTREE_ITER_NEED_RELOCK)
return 0;
@@ -227,6 +231,9 @@ static inline int __must_check bch2_btree_path_traverse(struct btree_trans *tran
btree_path_idx_t bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
unsigned, unsigned, unsigned, unsigned long);
+btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *, enum btree_id,
+ unsigned, struct bpos);
+
struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
/*
@@ -283,7 +290,6 @@ int bch2_trans_relock(struct btree_trans *);
int bch2_trans_relock_notrace(struct btree_trans *);
void bch2_trans_unlock(struct btree_trans *);
void bch2_trans_unlock_long(struct btree_trans *);
-bool bch2_trans_locked(struct btree_trans *);
static inline int trans_was_restarted(struct btree_trans *trans, u32 restart_count)
{
@@ -309,6 +315,14 @@ static inline void bch2_trans_verify_not_in_restart(struct btree_trans *trans)
bch2_trans_in_restart_error(trans);
}
+void __noreturn bch2_trans_unlocked_error(struct btree_trans *);
+
+static inline void bch2_trans_verify_not_unlocked(struct btree_trans *trans)
+{
+ if (!trans->locked)
+ bch2_trans_unlocked_error(trans);
+}
+
__always_inline
static int btree_trans_restart_nounlock(struct btree_trans *trans, int err)
{
@@ -386,10 +400,10 @@ static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos
if (unlikely(iter->update_path))
bch2_path_put(trans, iter->update_path,
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_intent);
iter->update_path = 0;
- if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
+ if (!(iter->flags & BTREE_ITER_all_snapshots))
new_pos.snapshot = iter->snapshot;
__bch2_btree_iter_set_pos(iter, new_pos);
@@ -397,7 +411,7 @@ static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos
static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *iter)
{
- BUG_ON(!(iter->flags & BTREE_ITER_IS_EXTENTS));
+ BUG_ON(!(iter->flags & BTREE_ITER_is_extents));
iter->pos = bkey_start_pos(&iter->k);
}
@@ -416,20 +430,20 @@ static inline unsigned __bch2_btree_iter_flags(struct btree_trans *trans,
unsigned btree_id,
unsigned flags)
{
- if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
+ if (!(flags & (BTREE_ITER_all_snapshots|BTREE_ITER_not_extents)) &&
btree_id_is_extents(btree_id))
- flags |= BTREE_ITER_IS_EXTENTS;
+ flags |= BTREE_ITER_is_extents;
- if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
+ if (!(flags & BTREE_ITER_snapshot_field) &&
!btree_type_has_snapshot_field(btree_id))
- flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
+ flags &= ~BTREE_ITER_all_snapshots;
- if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
+ if (!(flags & BTREE_ITER_all_snapshots) &&
btree_type_has_snapshots(btree_id))
- flags |= BTREE_ITER_FILTER_SNAPSHOTS;
+ flags |= BTREE_ITER_filter_snapshots;
if (trans->journal_replay_not_finished)
- flags |= BTREE_ITER_WITH_JOURNAL;
+ flags |= BTREE_ITER_with_journal;
return flags;
}
@@ -439,10 +453,10 @@ static inline unsigned bch2_btree_iter_flags(struct btree_trans *trans,
unsigned flags)
{
if (!btree_id_cached(trans->c, btree_id)) {
- flags &= ~BTREE_ITER_CACHED;
- flags &= ~BTREE_ITER_WITH_KEY_CACHE;
- } else if (!(flags & BTREE_ITER_CACHED))
- flags |= BTREE_ITER_WITH_KEY_CACHE;
+ flags &= ~BTREE_ITER_cached;
+ flags &= ~BTREE_ITER_with_key_cache;
+ } else if (!(flags & BTREE_ITER_cached))
+ flags |= BTREE_ITER_with_key_cache;
return __bch2_btree_iter_flags(trans, btree_id, flags);
}
@@ -494,13 +508,7 @@ void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *,
unsigned, unsigned, unsigned);
void bch2_trans_copy_iter(struct btree_iter *, struct btree_iter *);
-static inline void set_btree_iter_dontneed(struct btree_iter *iter)
-{
- struct btree_trans *trans = iter->trans;
-
- if (!trans->restarted)
- btree_iter_path(trans, iter)->preserve = false;
-}
+void bch2_set_btree_iter_dontneed(struct btree_iter *);
void *__bch2_trans_kmalloc(struct btree_trans *, size_t);
@@ -614,14 +622,14 @@ u32 bch2_trans_begin(struct btree_trans *);
static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_iter *iter,
unsigned flags)
{
- return flags & BTREE_ITER_SLOTS ? bch2_btree_iter_peek_slot(iter) :
+ return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(iter) :
bch2_btree_iter_peek_prev(iter);
}
static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_iter *iter,
unsigned flags)
{
- return flags & BTREE_ITER_SLOTS ? bch2_btree_iter_peek_slot(iter) :
+ return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(iter) :
bch2_btree_iter_peek(iter);
}
@@ -629,7 +637,7 @@ static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter *
struct bpos end,
unsigned flags)
{
- if (!(flags & BTREE_ITER_SLOTS))
+ if (!(flags & BTREE_ITER_slots))
return bch2_btree_iter_peek_upto(iter, end);
if (bkey_gt(iter->pos, end))
@@ -642,7 +650,7 @@ int __bch2_btree_trans_too_many_iters(struct btree_trans *);
static inline int btree_trans_too_many_iters(struct btree_trans *trans)
{
- if (bitmap_weight(trans->paths_allocated, trans->nr_paths) > BTREE_ITER_INITIAL - 8)
+ if (bitmap_weight(trans->paths_allocated, trans->nr_paths) > BTREE_ITER_NORMAL_LIMIT - 8)
return __bch2_btree_trans_too_many_iters(trans);
return 0;
@@ -694,16 +702,12 @@ transaction_restart: \
_ret2 ?: trans_was_restarted(_trans, _restart_count); \
})
-#define for_each_btree_key_upto(_trans, _iter, _btree_id, \
- _start, _end, _flags, _k, _do) \
+#define for_each_btree_key_upto_continue(_trans, _iter, \
+ _end, _flags, _k, _do) \
({ \
- struct btree_iter _iter; \
struct bkey_s_c _k; \
int _ret3 = 0; \
\
- bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
- (_start), (_flags)); \
- \
do { \
_ret3 = lockrestart_do(_trans, ({ \
(_k) = bch2_btree_iter_peek_upto_type(&(_iter), \
@@ -719,6 +723,21 @@ transaction_restart: \
_ret3; \
})
+#define for_each_btree_key_continue(_trans, _iter, _flags, _k, _do) \
+ for_each_btree_key_upto_continue(_trans, _iter, SPOS_MAX, _flags, _k, _do)
+
+#define for_each_btree_key_upto(_trans, _iter, _btree_id, \
+ _start, _end, _flags, _k, _do) \
+({ \
+ bch2_trans_begin(trans); \
+ \
+ struct btree_iter _iter; \
+ bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
+ (_start), (_flags)); \
+ \
+ for_each_btree_key_upto_continue(_trans, _iter, _end, _flags, _k, _do);\
+})
+
#define for_each_btree_key(_trans, _iter, _btree_id, \
_start, _flags, _k, _do) \
for_each_btree_key_upto(_trans, _iter, _btree_id, _start, \
@@ -789,14 +808,6 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
return k;
}
-#define for_each_btree_key_old(_trans, _iter, _btree_id, \
- _start, _flags, _k, _ret) \
- for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
- (_start), (_flags)); \
- (_k) = __bch2_btree_iter_peek_and_restart((_trans), &(_iter), _flags),\
- !((_ret) = bkey_err(_k)) && (_k).k; \
- bch2_btree_iter_advance(&(_iter)))
-
#define for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, \
_start, _end, _flags, _k, _ret) \
for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
@@ -856,6 +867,7 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
})
void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
+void bch2_btree_path_to_text(struct printbuf *, struct btree_trans *, btree_path_idx_t);
void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *);
void bch2_dump_trans_updates(struct btree_trans *);
void bch2_dump_trans_paths_updates(struct btree_trans *);
diff --git a/fs/bcachefs/btree_journal_iter.c b/fs/bcachefs/btree_journal_iter.c
index 50e04356d72c..332dbf164929 100644
--- a/fs/bcachefs/btree_journal_iter.c
+++ b/fs/bcachefs/btree_journal_iter.c
@@ -130,12 +130,30 @@ struct bkey_i *bch2_journal_keys_peek_slot(struct bch_fs *c, enum btree_id btree
return bch2_journal_keys_peek_upto(c, btree_id, level, pos, pos, &idx);
}
+static void journal_iter_verify(struct journal_iter *iter)
+{
+ struct journal_keys *keys = iter->keys;
+ size_t gap_size = keys->size - keys->nr;
+
+ BUG_ON(iter->idx >= keys->gap &&
+ iter->idx < keys->gap + gap_size);
+
+ if (iter->idx < keys->size) {
+ struct journal_key *k = keys->data + iter->idx;
+
+ int cmp = cmp_int(k->btree_id, iter->btree_id) ?:
+ cmp_int(k->level, iter->level);
+ BUG_ON(cmp < 0);
+ }
+}
+
static void journal_iters_fix(struct bch_fs *c)
{
struct journal_keys *keys = &c->journal_keys;
/* The key we just inserted is immediately before the gap: */
size_t gap_end = keys->gap + (keys->size - keys->nr);
- struct btree_and_journal_iter *iter;
+ struct journal_key *new_key = &keys->data[keys->gap - 1];
+ struct journal_iter *iter;
/*
* If an iterator points one after the key we just inserted, decrement
@@ -143,9 +161,14 @@ static void journal_iters_fix(struct bch_fs *c)
* decrement was unnecessary, bch2_btree_and_journal_iter_peek() will
* handle that:
*/
- list_for_each_entry(iter, &c->journal_iters, journal.list)
- if (iter->journal.idx == gap_end)
- iter->journal.idx = keys->gap - 1;
+ list_for_each_entry(iter, &c->journal_iters, list) {
+ journal_iter_verify(iter);
+ if (iter->idx == gap_end &&
+ new_key->btree_id == iter->btree_id &&
+ new_key->level == iter->level)
+ iter->idx = keys->gap - 1;
+ journal_iter_verify(iter);
+ }
}
static void journal_iters_move_gap(struct bch_fs *c, size_t old_gap, size_t new_gap)
@@ -192,7 +215,12 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
if (idx > keys->gap)
idx -= keys->size - keys->nr;
+ size_t old_gap = keys->gap;
+
if (keys->nr == keys->size) {
+ journal_iters_move_gap(c, old_gap, keys->size);
+ old_gap = keys->size;
+
struct journal_keys new_keys = {
.nr = keys->nr,
.size = max_t(size_t, keys->size, 8) * 2,
@@ -216,7 +244,7 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
keys->gap = keys->nr;
}
- journal_iters_move_gap(c, keys->gap, idx);
+ journal_iters_move_gap(c, old_gap, idx);
move_gap(keys, idx);
@@ -261,6 +289,22 @@ int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
return bch2_journal_key_insert(c, id, level, &whiteout);
}
+bool bch2_key_deleted_in_journal(struct btree_trans *trans, enum btree_id btree,
+ unsigned level, struct bpos pos)
+{
+ struct journal_keys *keys = &trans->c->journal_keys;
+ size_t idx = bch2_journal_key_search(keys, btree, level, pos);
+
+ if (!trans->journal_replay_not_finished)
+ return false;
+
+ return (idx < keys->size &&
+ keys->data[idx].btree_id == btree &&
+ keys->data[idx].level == level &&
+ bpos_eq(keys->data[idx].k->k.p, pos) &&
+ bkey_deleted(&keys->data[idx].k->k));
+}
+
void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree,
unsigned level, struct bpos pos)
{
@@ -285,16 +329,21 @@ static void bch2_journal_iter_advance(struct journal_iter *iter)
static struct bkey_s_c bch2_journal_iter_peek(struct journal_iter *iter)
{
- struct journal_key *k = iter->keys->data + iter->idx;
+ journal_iter_verify(iter);
+
+ while (iter->idx < iter->keys->size) {
+ struct journal_key *k = iter->keys->data + iter->idx;
+
+ int cmp = cmp_int(k->btree_id, iter->btree_id) ?:
+ cmp_int(k->level, iter->level);
+ if (cmp > 0)
+ break;
+ BUG_ON(cmp);
- while (k < iter->keys->data + iter->keys->size &&
- k->btree_id == iter->btree_id &&
- k->level == iter->level) {
if (!k->overwritten)
return bkey_i_to_s_c(k->k);
bch2_journal_iter_advance(iter);
- k = iter->keys->data + iter->idx;
}
return bkey_s_c_null;
@@ -314,6 +363,8 @@ static void bch2_journal_iter_init(struct bch_fs *c,
iter->level = level;
iter->keys = &c->journal_keys;
iter->idx = bch2_journal_key_search(&c->journal_keys, id, level, pos);
+
+ journal_iter_verify(iter);
}
static struct bkey_s_c bch2_journal_iter_peek_btree(struct btree_and_journal_iter *iter)
@@ -363,7 +414,7 @@ static void btree_and_journal_iter_prefetch(struct btree_and_journal_iter *_iter
struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter)
{
- struct bkey_s_c btree_k, journal_k, ret;
+ struct bkey_s_c btree_k, journal_k = bkey_s_c_null, ret;
if (iter->prefetch && iter->journal.level)
btree_and_journal_iter_prefetch(iter);
@@ -375,9 +426,10 @@ again:
bpos_lt(btree_k.k->p, iter->pos))
bch2_journal_iter_advance_btree(iter);
- while ((journal_k = bch2_journal_iter_peek(&iter->journal)).k &&
- bpos_lt(journal_k.k->p, iter->pos))
- bch2_journal_iter_advance(&iter->journal);
+ if (iter->trans->journal_replay_not_finished)
+ while ((journal_k = bch2_journal_iter_peek(&iter->journal)).k &&
+ bpos_lt(journal_k.k->p, iter->pos))
+ bch2_journal_iter_advance(&iter->journal);
ret = journal_k.k &&
(!btree_k.k || bpos_le(journal_k.k->p, btree_k.k->p))
@@ -417,10 +469,15 @@ void __bch2_btree_and_journal_iter_init_node_iter(struct btree_trans *trans,
iter->trans = trans;
iter->b = b;
iter->node_iter = node_iter;
- bch2_journal_iter_init(trans->c, &iter->journal, b->c.btree_id, b->c.level, pos);
- INIT_LIST_HEAD(&iter->journal.list);
iter->pos = b->data->min_key;
iter->at_end = false;
+ INIT_LIST_HEAD(&iter->journal.list);
+
+ if (trans->journal_replay_not_finished) {
+ bch2_journal_iter_init(trans->c, &iter->journal, b->c.btree_id, b->c.level, pos);
+ if (!test_bit(BCH_FS_may_go_rw, &trans->c->flags))
+ list_add(&iter->journal.list, &trans->c->journal_iters);
+ }
}
/*
@@ -435,7 +492,6 @@ void bch2_btree_and_journal_iter_init_node_iter(struct btree_trans *trans,
bch2_btree_node_iter_init_from_start(&node_iter, b);
__bch2_btree_and_journal_iter_init_node_iter(trans, iter, b, node_iter, b->data->min_key);
- list_add(&iter->journal.list, &trans->c->journal_iters);
}
/* sort and dedup all keys in the journal: */
@@ -548,3 +604,39 @@ int bch2_journal_keys_sort(struct bch_fs *c)
bch_verbose(c, "Journal keys: %zu read, %zu after sorting and compacting", nr_read, keys->nr);
return 0;
}
+
+void bch2_shoot_down_journal_keys(struct bch_fs *c, enum btree_id btree,
+ unsigned level_min, unsigned level_max,
+ struct bpos start, struct bpos end)
+{
+ struct journal_keys *keys = &c->journal_keys;
+ size_t dst = 0;
+
+ move_gap(keys, keys->nr);
+
+ darray_for_each(*keys, i)
+ if (!(i->btree_id == btree &&
+ i->level >= level_min &&
+ i->level <= level_max &&
+ bpos_ge(i->k->k.p, start) &&
+ bpos_le(i->k->k.p, end)))
+ keys->data[dst++] = *i;
+ keys->nr = keys->gap = dst;
+}
+
+void bch2_journal_keys_dump(struct bch_fs *c)
+{
+ struct journal_keys *keys = &c->journal_keys;
+ struct printbuf buf = PRINTBUF;
+
+ pr_info("%zu keys:", keys->nr);
+
+ move_gap(keys, keys->nr);
+
+ darray_for_each(*keys, i) {
+ printbuf_reset(&buf);
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(i->k));
+ pr_err("%s l=%u %s", bch2_btree_id_str(i->btree_id), i->level, buf.buf);
+ }
+ printbuf_exit(&buf);
+}
diff --git a/fs/bcachefs/btree_journal_iter.h b/fs/bcachefs/btree_journal_iter.h
index c9d19da3ea04..1ba4a79b0ef9 100644
--- a/fs/bcachefs/btree_journal_iter.h
+++ b/fs/bcachefs/btree_journal_iter.h
@@ -40,8 +40,8 @@ int bch2_journal_key_insert(struct bch_fs *, enum btree_id,
unsigned, struct bkey_i *);
int bch2_journal_key_delete(struct bch_fs *, enum btree_id,
unsigned, struct bpos);
-void bch2_journal_key_overwritten(struct bch_fs *, enum btree_id,
- unsigned, struct bpos);
+bool bch2_key_deleted_in_journal(struct btree_trans *, enum btree_id, unsigned, struct bpos);
+void bch2_journal_key_overwritten(struct bch_fs *, enum btree_id, unsigned, struct bpos);
void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *);
struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *);
@@ -66,4 +66,10 @@ void bch2_journal_entries_free(struct bch_fs *);
int bch2_journal_keys_sort(struct bch_fs *);
+void bch2_shoot_down_journal_keys(struct bch_fs *, enum btree_id,
+ unsigned, unsigned,
+ struct bpos, struct bpos);
+
+void bch2_journal_keys_dump(struct bch_fs *);
+
#endif /* _BCACHEFS_BTREE_JOURNAL_ITER_H */
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index 581edcb0911b..75f5e6fe4634 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -169,6 +169,7 @@ static void bkey_cached_move_to_freelist(struct btree_key_cache *bc,
} else {
mutex_lock(&bc->lock);
list_move_tail(&ck->list, &bc->freed_pcpu);
+ bc->nr_freed_pcpu++;
mutex_unlock(&bc->lock);
}
}
@@ -245,6 +246,7 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path,
if (!list_empty(&bc->freed_pcpu)) {
ck = list_last_entry(&bc->freed_pcpu, struct bkey_cached, list);
list_del_init(&ck->list);
+ bc->nr_freed_pcpu--;
}
mutex_unlock(&bc->lock);
}
@@ -381,9 +383,9 @@ static int btree_key_cache_fill(struct btree_trans *trans,
int ret;
bch2_trans_iter_init(trans, &iter, ck->key.btree_id, ck->key.pos,
- BTREE_ITER_KEY_CACHE_FILL|
- BTREE_ITER_CACHED_NOFILL);
- iter.flags &= ~BTREE_ITER_WITH_JOURNAL;
+ BTREE_ITER_key_cache_fill|
+ BTREE_ITER_cached_nofill);
+ iter.flags &= ~BTREE_ITER_with_journal;
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
@@ -454,7 +456,7 @@ static int btree_key_cache_fill(struct btree_trans *trans,
bch2_btree_node_unlock_write(trans, ck_path, ck_path->l[0].b);
/* We're not likely to need this iterator again: */
- set_btree_iter_dontneed(&iter);
+ bch2_set_btree_iter_dontneed(&iter);
err:
bch2_trans_iter_exit(trans, &iter);
return ret;
@@ -513,23 +515,10 @@ retry:
fill:
path->uptodate = BTREE_ITER_UPTODATE;
- if (!ck->valid && !(flags & BTREE_ITER_CACHED_NOFILL)) {
- /*
- * Using the underscore version because we haven't set
- * path->uptodate yet:
- */
- if (!path->locks_want &&
- !__bch2_btree_path_upgrade(trans, path, 1, NULL)) {
- trace_and_count(trans->c, trans_restart_key_cache_upgrade, trans, _THIS_IP_);
- ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_upgrade);
- goto err;
- }
-
- ret = btree_key_cache_fill(trans, path, ck);
- if (ret)
- goto err;
-
- ret = bch2_btree_path_relock(trans, path, _THIS_IP_);
+ if (!ck->valid && !(flags & BTREE_ITER_cached_nofill)) {
+ ret = bch2_btree_path_upgrade(trans, path, 1) ?:
+ btree_key_cache_fill(trans, path, ck) ?:
+ bch2_btree_path_relock(trans, path, _THIS_IP_);
if (ret)
goto err;
@@ -620,13 +609,13 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
int ret;
bch2_trans_iter_init(trans, &b_iter, key.btree_id, key.pos,
- BTREE_ITER_SLOTS|
- BTREE_ITER_INTENT|
- BTREE_ITER_ALL_SNAPSHOTS);
+ BTREE_ITER_slots|
+ BTREE_ITER_intent|
+ BTREE_ITER_all_snapshots);
bch2_trans_iter_init(trans, &c_iter, key.btree_id, key.pos,
- BTREE_ITER_CACHED|
- BTREE_ITER_INTENT);
- b_iter.flags &= ~BTREE_ITER_WITH_KEY_CACHE;
+ BTREE_ITER_cached|
+ BTREE_ITER_intent);
+ b_iter.flags &= ~BTREE_ITER_with_key_cache;
ret = bch2_btree_iter_traverse(&c_iter);
if (ret)
@@ -659,14 +648,14 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
commit_flags |= BCH_WATERMARK_reclaim;
if (ck->journal.seq != journal_last_seq(j) ||
- j->watermark == BCH_WATERMARK_stripe)
+ !test_bit(JOURNAL_space_low, &c->journal.flags))
commit_flags |= BCH_TRANS_COMMIT_no_journal_res;
ret = bch2_btree_iter_traverse(&b_iter) ?:
bch2_trans_update(trans, &b_iter, ck->k,
- BTREE_UPDATE_KEY_CACHE_RECLAIM|
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
- BTREE_TRIGGER_NORUN) ?:
+ BTREE_UPDATE_key_cache_reclaim|
+ BTREE_UPDATE_internal_snapshot_node|
+ BTREE_TRIGGER_norun) ?:
bch2_trans_commit(trans, NULL, NULL,
BCH_TRANS_COMMIT_no_check_rw|
BCH_TRANS_COMMIT_no_enospc|
@@ -788,7 +777,7 @@ bool bch2_btree_insert_key_cached(struct btree_trans *trans,
* flushing. The flush callback will not proceed unless ->seq matches
* the latest pin, so make sure it starts with a consistent value.
*/
- if (!(insert_entry->flags & BTREE_UPDATE_NOJOURNAL) ||
+ if (!(insert_entry->flags & BTREE_UPDATE_nojournal) ||
!journal_pin_active(&ck->journal)) {
ck->seq = trans->journal_res.seq;
}
@@ -833,6 +822,8 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
int srcu_idx;
mutex_lock(&bc->lock);
+ bc->requested_to_free += sc->nr_to_scan;
+
srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
flags = memalloc_nofs_save();
@@ -840,8 +831,6 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
* Newest freed entries are at the end of the list - once we hit one
* that's too new to be freed, we can bail out:
*/
- scanned += bc->nr_freed_nonpcpu;
-
list_for_each_entry_safe(ck, t, &bc->freed_nonpcpu, list) {
if (!poll_state_synchronize_srcu(&c->btree_trans_barrier,
ck->btree_trans_barrier_seq))
@@ -853,13 +842,9 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
atomic_long_dec(&bc->nr_freed);
freed++;
bc->nr_freed_nonpcpu--;
+ bc->freed++;
}
- if (scanned >= nr)
- goto out;
-
- scanned += bc->nr_freed_pcpu;
-
list_for_each_entry_safe(ck, t, &bc->freed_pcpu, list) {
if (!poll_state_synchronize_srcu(&c->btree_trans_barrier,
ck->btree_trans_barrier_seq))
@@ -871,11 +856,9 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
atomic_long_dec(&bc->nr_freed);
freed++;
bc->nr_freed_pcpu--;
+ bc->freed++;
}
- if (scanned >= nr)
- goto out;
-
rcu_read_lock();
tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
if (bc->shrink_iter >= tbl->size)
@@ -891,14 +874,19 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
next = rht_dereference_bucket_rcu(pos->next, tbl, bc->shrink_iter);
ck = container_of(pos, struct bkey_cached, hash);
- if (test_bit(BKEY_CACHED_DIRTY, &ck->flags))
+ if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
+ bc->skipped_dirty++;
goto next;
-
- if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
+ } else if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags)) {
clear_bit(BKEY_CACHED_ACCESSED, &ck->flags);
- else if (bkey_cached_lock_for_evict(ck)) {
+ bc->skipped_accessed++;
+ goto next;
+ } else if (bkey_cached_lock_for_evict(ck)) {
bkey_cached_evict(bc, ck);
bkey_cached_free(bc, ck);
+ bc->moved_to_freelist++;
+ } else {
+ bc->skipped_lock_fail++;
}
scanned++;
@@ -914,7 +902,6 @@ next:
} while (scanned < nr && bc->shrink_iter != start);
rcu_read_unlock();
-out:
memalloc_nofs_restore(flags);
srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
mutex_unlock(&bc->lock);
@@ -965,13 +952,15 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
}
#ifdef __KERNEL__
- for_each_possible_cpu(cpu) {
- struct btree_key_cache_freelist *f =
- per_cpu_ptr(bc->pcpu_freed, cpu);
-
- for (i = 0; i < f->nr; i++) {
- ck = f->objs[i];
- list_add(&ck->list, &items);
+ if (bc->pcpu_freed) {
+ for_each_possible_cpu(cpu) {
+ struct btree_key_cache_freelist *f =
+ per_cpu_ptr(bc->pcpu_freed, cpu);
+
+ for (i = 0; i < f->nr; i++) {
+ ck = f->objs[i];
+ list_add(&ck->list, &items);
+ }
}
}
#endif
@@ -1044,14 +1033,47 @@ int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
return 0;
}
-void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *c)
+void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *bc)
{
- prt_printf(out, "nr_freed:\t%lu", atomic_long_read(&c->nr_freed));
- prt_newline(out);
- prt_printf(out, "nr_keys:\t%lu", atomic_long_read(&c->nr_keys));
- prt_newline(out);
- prt_printf(out, "nr_dirty:\t%lu", atomic_long_read(&c->nr_dirty));
- prt_newline(out);
+ struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
+
+ printbuf_tabstop_push(out, 24);
+ printbuf_tabstop_push(out, 12);
+
+ unsigned flags = memalloc_nofs_save();
+ mutex_lock(&bc->lock);
+ prt_printf(out, "keys:\t%lu\r\n", atomic_long_read(&bc->nr_keys));
+ prt_printf(out, "dirty:\t%lu\r\n", atomic_long_read(&bc->nr_dirty));
+ prt_printf(out, "freelist:\t%lu\r\n", atomic_long_read(&bc->nr_freed));
+ prt_printf(out, "nonpcpu freelist:\t%zu\r\n", bc->nr_freed_nonpcpu);
+ prt_printf(out, "pcpu freelist:\t%zu\r\n", bc->nr_freed_pcpu);
+
+ prt_printf(out, "\nshrinker:\n");
+ prt_printf(out, "requested_to_free:\t%lu\r\n", bc->requested_to_free);
+ prt_printf(out, "freed:\t%lu\r\n", bc->freed);
+ prt_printf(out, "moved_to_freelist:\t%lu\r\n", bc->moved_to_freelist);
+ prt_printf(out, "skipped_dirty:\t%lu\r\n", bc->skipped_dirty);
+ prt_printf(out, "skipped_accessed:\t%lu\r\n", bc->skipped_accessed);
+ prt_printf(out, "skipped_lock_fail:\t%lu\r\n", bc->skipped_lock_fail);
+
+ prt_printf(out, "srcu seq:\t%lu\r\n", get_state_synchronize_srcu(&c->btree_trans_barrier));
+
+ struct bkey_cached *ck;
+ unsigned iter = 0;
+ list_for_each_entry(ck, &bc->freed_nonpcpu, list) {
+ prt_printf(out, "freed_nonpcpu:\t%lu\r\n", ck->btree_trans_barrier_seq);
+ if (++iter > 10)
+ break;
+ }
+
+ iter = 0;
+ list_for_each_entry(ck, &bc->freed_pcpu, list) {
+ prt_printf(out, "freed_pcpu:\t%lu\r\n", ck->btree_trans_barrier_seq);
+ if (++iter > 10)
+ break;
+ }
+ mutex_unlock(&bc->lock);
+ memalloc_flags_restore(flags);
}
void bch2_btree_key_cache_exit(void)
diff --git a/fs/bcachefs/btree_key_cache_types.h b/fs/bcachefs/btree_key_cache_types.h
index 290e4e57df5b..237e8bb3ac40 100644
--- a/fs/bcachefs/btree_key_cache_types.h
+++ b/fs/bcachefs/btree_key_cache_types.h
@@ -24,6 +24,14 @@ struct btree_key_cache {
atomic_long_t nr_freed;
atomic_long_t nr_keys;
atomic_long_t nr_dirty;
+
+ /* shrinker stats */
+ unsigned long requested_to_free;
+ unsigned long freed;
+ unsigned long moved_to_freelist;
+ unsigned long skipped_dirty;
+ unsigned long skipped_accessed;
+ unsigned long skipped_lock_fail;
};
struct bkey_cached_key {
diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c
index b9b151e693ed..c3e9b0cc7bbd 100644
--- a/fs/bcachefs/btree_locking.c
+++ b/fs/bcachefs/btree_locking.c
@@ -83,8 +83,7 @@ static noinline void print_cycle(struct printbuf *out, struct lock_graph *g)
{
struct trans_waiting_for_lock *i;
- prt_printf(out, "Found lock cycle (%u entries):", g->nr);
- prt_newline(out);
+ prt_printf(out, "Found lock cycle (%u entries):\n", g->nr);
for (i = g->g; i < g->g + g->nr; i++) {
struct task_struct *task = READ_ONCE(i->trans->locking_wait.task);
@@ -224,8 +223,7 @@ static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
bch2_btree_trans_to_text(&buf, trans);
- prt_printf(&buf, "backtrace:");
- prt_newline(&buf);
+ prt_printf(&buf, "backtrace:\n");
printbuf_indent_add(&buf, 2);
bch2_prt_task_backtrace(&buf, trans->locking_wait.task, 2, GFP_NOWAIT);
printbuf_indent_sub(&buf, 2);
@@ -440,33 +438,7 @@ void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
struct btree_path *path,
struct btree_bkey_cached_common *b)
{
- struct btree_path *linked;
- unsigned i, iter;
- int ret;
-
- /*
- * XXX BIG FAT NOTICE
- *
- * Drop all read locks before taking a write lock:
- *
- * This is a hack, because bch2_btree_node_lock_write_nofail() is a
- * hack - but by dropping read locks first, this should never fail, and
- * we only use this in code paths where whatever read locks we've
- * already taken are no longer needed:
- */
-
- trans_for_each_path(trans, linked, iter) {
- if (!linked->nodes_locked)
- continue;
-
- for (i = 0; i < BTREE_MAX_DEPTH; i++)
- if (btree_node_read_locked(linked, i)) {
- btree_node_unlock(trans, linked, i);
- btree_path_set_dirty(linked, BTREE_ITER_NEED_RELOCK);
- }
- }
-
- ret = __btree_node_lock_write(trans, path, b, true);
+ int ret = __btree_node_lock_write(trans, path, b, true);
BUG_ON(ret);
}
@@ -518,8 +490,6 @@ static inline bool btree_path_get_locks(struct btree_trans *trans,
if (path->uptodate == BTREE_ITER_NEED_RELOCK)
path->uptodate = BTREE_ITER_UPTODATE;
- bch2_trans_verify_locks(trans);
-
return path->uptodate < BTREE_ITER_NEED_RELOCK;
}
@@ -635,7 +605,9 @@ bool bch2_btree_path_relock_norestart(struct btree_trans *trans, struct btree_pa
{
struct get_locks_fail f;
- return btree_path_get_locks(trans, path, false, &f);
+ bool ret = btree_path_get_locks(trans, path, false, &f);
+ bch2_trans_verify_locks(trans);
+ return ret;
}
int __bch2_btree_path_relock(struct btree_trans *trans,
@@ -658,7 +630,9 @@ bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
path->locks_want = new_locks_want;
- return btree_path_get_locks(trans, path, true, f);
+ bool ret = btree_path_get_locks(trans, path, true, f);
+ bch2_trans_verify_locks(trans);
+ return ret;
}
bool __bch2_btree_path_upgrade(struct btree_trans *trans,
@@ -666,8 +640,9 @@ bool __bch2_btree_path_upgrade(struct btree_trans *trans,
unsigned new_locks_want,
struct get_locks_fail *f)
{
- if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want, f))
- return true;
+ bool ret = bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want, f);
+ if (ret)
+ goto out;
/*
* XXX: this is ugly - we'd prefer to not be mucking with other
@@ -701,8 +676,9 @@ bool __bch2_btree_path_upgrade(struct btree_trans *trans,
btree_path_get_locks(trans, linked, true, NULL);
}
}
-
- return false;
+out:
+ bch2_trans_verify_locks(trans);
+ return ret;
}
void __bch2_btree_path_downgrade(struct btree_trans *trans,
@@ -751,82 +727,100 @@ void bch2_trans_downgrade(struct btree_trans *trans)
bch2_btree_path_downgrade(trans, path);
}
-int bch2_trans_relock(struct btree_trans *trans)
+static inline void __bch2_trans_unlock(struct btree_trans *trans)
{
struct btree_path *path;
unsigned i;
- if (unlikely(trans->restarted))
- return -((int) trans->restarted);
+ trans_for_each_path(trans, path, i)
+ __bch2_btree_path_unlock(trans, path);
+}
- trans_for_each_path(trans, path, i) {
- struct get_locks_fail f;
+static noinline __cold int bch2_trans_relock_fail(struct btree_trans *trans, struct btree_path *path,
+ struct get_locks_fail *f, bool trace)
+{
+ if (!trace)
+ goto out;
- if (path->should_be_locked &&
- !btree_path_get_locks(trans, path, false, &f)) {
- if (trace_trans_restart_relock_enabled()) {
- struct printbuf buf = PRINTBUF;
-
- bch2_bpos_to_text(&buf, path->pos);
- prt_printf(&buf, " l=%u seq=%u node seq=",
- f.l, path->l[f.l].lock_seq);
- if (IS_ERR_OR_NULL(f.b)) {
- prt_str(&buf, bch2_err_str(PTR_ERR(f.b)));
- } else {
- prt_printf(&buf, "%u", f.b->c.lock.seq);
-
- struct six_lock_count c =
- bch2_btree_node_lock_counts(trans, NULL, &f.b->c, f.l);
- prt_printf(&buf, " self locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
-
- c = six_lock_counts(&f.b->c.lock);
- prt_printf(&buf, " total locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
- }
+ if (trace_trans_restart_relock_enabled()) {
+ struct printbuf buf = PRINTBUF;
- trace_trans_restart_relock(trans, _RET_IP_, buf.buf);
- printbuf_exit(&buf);
- }
+ bch2_bpos_to_text(&buf, path->pos);
+ prt_printf(&buf, " l=%u seq=%u node seq=", f->l, path->l[f->l].lock_seq);
+ if (IS_ERR_OR_NULL(f->b)) {
+ prt_str(&buf, bch2_err_str(PTR_ERR(f->b)));
+ } else {
+ prt_printf(&buf, "%u", f->b->c.lock.seq);
+
+ struct six_lock_count c =
+ bch2_btree_node_lock_counts(trans, NULL, &f->b->c, f->l);
+ prt_printf(&buf, " self locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
- count_event(trans->c, trans_restart_relock);
- return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
+ c = six_lock_counts(&f->b->c.lock);
+ prt_printf(&buf, " total locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
}
+
+ trace_trans_restart_relock(trans, _RET_IP_, buf.buf);
+ printbuf_exit(&buf);
}
- return 0;
+ count_event(trans->c, trans_restart_relock);
+out:
+ __bch2_trans_unlock(trans);
+ bch2_trans_verify_locks(trans);
+ return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
}
-int bch2_trans_relock_notrace(struct btree_trans *trans)
+static inline int __bch2_trans_relock(struct btree_trans *trans, bool trace)
{
- struct btree_path *path;
- unsigned i;
+ bch2_trans_verify_locks(trans);
if (unlikely(trans->restarted))
return -((int) trans->restarted);
+ if (unlikely(trans->locked))
+ goto out;
+
+ struct btree_path *path;
+ unsigned i;
+
+ trans_for_each_path(trans, path, i) {
+ struct get_locks_fail f;
- trans_for_each_path(trans, path, i)
if (path->should_be_locked &&
- !bch2_btree_path_relock_norestart(trans, path)) {
- return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
- }
+ !btree_path_get_locks(trans, path, false, &f))
+ return bch2_trans_relock_fail(trans, path, &f, trace);
+ }
+
+ trans->locked = true;
+out:
+ bch2_trans_verify_locks(trans);
return 0;
}
+int bch2_trans_relock(struct btree_trans *trans)
+{
+ return __bch2_trans_relock(trans, true);
+}
+
+int bch2_trans_relock_notrace(struct btree_trans *trans)
+{
+ return __bch2_trans_relock(trans, false);
+}
+
void bch2_trans_unlock_noassert(struct btree_trans *trans)
{
- struct btree_path *path;
- unsigned i;
+ __bch2_trans_unlock(trans);
- trans_for_each_path(trans, path, i)
- __bch2_btree_path_unlock(trans, path);
+ trans->locked = false;
+ trans->last_unlock_ip = _RET_IP_;
}
void bch2_trans_unlock(struct btree_trans *trans)
{
- struct btree_path *path;
- unsigned i;
+ __bch2_trans_unlock(trans);
- trans_for_each_path(trans, path, i)
- __bch2_btree_path_unlock(trans, path);
+ trans->locked = false;
+ trans->last_unlock_ip = _RET_IP_;
}
void bch2_trans_unlock_long(struct btree_trans *trans)
@@ -835,17 +829,6 @@ void bch2_trans_unlock_long(struct btree_trans *trans)
bch2_trans_srcu_unlock(trans);
}
-bool bch2_trans_locked(struct btree_trans *trans)
-{
- struct btree_path *path;
- unsigned i;
-
- trans_for_each_path(trans, path, i)
- if (path->nodes_locked)
- return true;
- return false;
-}
-
int __bch2_trans_mutex_lock(struct btree_trans *trans,
struct mutex *lock)
{
@@ -862,15 +845,19 @@ int __bch2_trans_mutex_lock(struct btree_trans *trans,
void bch2_btree_path_verify_locks(struct btree_path *path)
{
- unsigned l;
+ /*
+ * A path may be uptodate and yet have nothing locked if and only if
+ * there is no node at path->level, which generally means we were
+ * iterating over all nodes and got to the end of the btree
+ */
+ BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
+ btree_path_node(path, path->level) &&
+ !path->nodes_locked);
- if (!path->nodes_locked) {
- BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
- btree_path_node(path, path->level));
+ if (!path->nodes_locked)
return;
- }
- for (l = 0; l < BTREE_MAX_DEPTH; l++) {
+ for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) {
int want = btree_lock_want(path, l);
int have = btree_node_locked_type(path, l);
@@ -883,8 +870,24 @@ void bch2_btree_path_verify_locks(struct btree_path *path)
}
}
+static bool bch2_trans_locked(struct btree_trans *trans)
+{
+ struct btree_path *path;
+ unsigned i;
+
+ trans_for_each_path(trans, path, i)
+ if (path->nodes_locked)
+ return true;
+ return false;
+}
+
void bch2_trans_verify_locks(struct btree_trans *trans)
{
+ if (!trans->locked) {
+ BUG_ON(bch2_trans_locked(trans));
+ return;
+ }
+
struct btree_path *path;
unsigned i;
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index 4bd72c855da1..7f41545b9147 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -364,14 +364,14 @@ static inline int bch2_btree_path_upgrade(struct btree_trans *trans,
struct btree_path *path,
unsigned new_locks_want)
{
- struct get_locks_fail f;
+ struct get_locks_fail f = {};
unsigned old_locks_want = path->locks_want;
new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
if (path->locks_want < new_locks_want
? __bch2_btree_path_upgrade(trans, path, new_locks_want, &f)
- : path->uptodate == BTREE_ITER_UPTODATE)
+ : path->nodes_locked)
return 0;
trace_and_count(trans->c, trans_restart_upgrade, trans, _THIS_IP_, path,
diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c
new file mode 100644
index 000000000000..45cb8149d374
--- /dev/null
+++ b/fs/bcachefs/btree_node_scan.c
@@ -0,0 +1,524 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "btree_cache.h"
+#include "btree_io.h"
+#include "btree_journal_iter.h"
+#include "btree_node_scan.h"
+#include "btree_update_interior.h"
+#include "buckets.h"
+#include "error.h"
+#include "journal_io.h"
+#include "recovery_passes.h"
+
+#include <linux/kthread.h>
+#include <linux/sort.h>
+
+struct find_btree_nodes_worker {
+ struct closure *cl;
+ struct find_btree_nodes *f;
+ struct bch_dev *ca;
+};
+
+static void found_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struct found_btree_node *n)
+{
+ prt_printf(out, "%s l=%u seq=%u cookie=%llx ", bch2_btree_id_str(n->btree_id), n->level, n->seq, n->cookie);
+ bch2_bpos_to_text(out, n->min_key);
+ prt_str(out, "-");
+ bch2_bpos_to_text(out, n->max_key);
+
+ if (n->range_updated)
+ prt_str(out, " range updated");
+ if (n->overwritten)
+ prt_str(out, " overwritten");
+
+ for (unsigned i = 0; i < n->nr_ptrs; i++) {
+ prt_char(out, ' ');
+ bch2_extent_ptr_to_text(out, c, n->ptrs + i);
+ }
+}
+
+static void found_btree_nodes_to_text(struct printbuf *out, struct bch_fs *c, found_btree_nodes nodes)
+{
+ printbuf_indent_add(out, 2);
+ darray_for_each(nodes, i) {
+ found_btree_node_to_text(out, c, i);
+ prt_newline(out);
+ }
+ printbuf_indent_sub(out, 2);
+}
+
+static void found_btree_node_to_key(struct bkey_i *k, const struct found_btree_node *f)
+{
+ struct bkey_i_btree_ptr_v2 *bp = bkey_btree_ptr_v2_init(k);
+
+ set_bkey_val_u64s(&bp->k, sizeof(struct bch_btree_ptr_v2) / sizeof(u64) + f->nr_ptrs);
+ bp->k.p = f->max_key;
+ bp->v.seq = cpu_to_le64(f->cookie);
+ bp->v.sectors_written = 0;
+ bp->v.flags = 0;
+ bp->v.sectors_written = cpu_to_le16(f->sectors_written);
+ bp->v.min_key = f->min_key;
+ SET_BTREE_PTR_RANGE_UPDATED(&bp->v, f->range_updated);
+ memcpy(bp->v.start, f->ptrs, sizeof(struct bch_extent_ptr) * f->nr_ptrs);
+}
+
+static bool found_btree_node_is_readable(struct btree_trans *trans,
+ struct found_btree_node *f)
+{
+ struct { __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX); } k;
+
+ found_btree_node_to_key(&k.k, f);
+
+ struct btree *b = bch2_btree_node_get_noiter(trans, &k.k, f->btree_id, f->level, false);
+ bool ret = !IS_ERR_OR_NULL(b);
+ if (ret) {
+ f->sectors_written = b->written;
+ six_unlock_read(&b->c.lock);
+ }
+
+ /*
+ * We might update this node's range; if that happens, we need the node
+ * to be re-read so the read path can trim keys that are no longer in
+ * this node
+ */
+ if (b != btree_node_root(trans->c, b))
+ bch2_btree_node_evict(trans, &k.k);
+ return ret;
+}
+
+static int found_btree_node_cmp_cookie(const void *_l, const void *_r)
+{
+ const struct found_btree_node *l = _l;
+ const struct found_btree_node *r = _r;
+
+ return cmp_int(l->btree_id, r->btree_id) ?:
+ cmp_int(l->level, r->level) ?:
+ cmp_int(l->cookie, r->cookie);
+}
+
+/*
+ * Given two found btree nodes, if their sequence numbers are equal, take the
+ * one that's readable:
+ */
+static int found_btree_node_cmp_time(const struct found_btree_node *l,
+ const struct found_btree_node *r)
+{
+ return cmp_int(l->seq, r->seq);
+}
+
+static int found_btree_node_cmp_pos(const void *_l, const void *_r)
+{
+ const struct found_btree_node *l = _l;
+ const struct found_btree_node *r = _r;
+
+ return cmp_int(l->btree_id, r->btree_id) ?:
+ -cmp_int(l->level, r->level) ?:
+ bpos_cmp(l->min_key, r->min_key) ?:
+ -found_btree_node_cmp_time(l, r);
+}
+
+static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca,
+ struct bio *bio, struct btree_node *bn, u64 offset)
+{
+ struct bch_fs *c = container_of(f, struct bch_fs, found_btree_nodes);
+
+ bio_reset(bio, ca->disk_sb.bdev, REQ_OP_READ);
+ bio->bi_iter.bi_sector = offset;
+ bch2_bio_map(bio, bn, PAGE_SIZE);
+
+ submit_bio_wait(bio);
+ if (bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_read,
+ "IO error in try_read_btree_node() at %llu: %s",
+ offset, bch2_blk_status_to_str(bio->bi_status)))
+ return;
+
+ if (le64_to_cpu(bn->magic) != bset_magic(c))
+ return;
+
+ if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(&bn->keys))) {
+ struct nonce nonce = btree_nonce(&bn->keys, 0);
+ unsigned bytes = (void *) &bn->keys - (void *) &bn->flags;
+
+ bch2_encrypt(c, BSET_CSUM_TYPE(&bn->keys), nonce, &bn->flags, bytes);
+ }
+
+ if (btree_id_is_alloc(BTREE_NODE_ID(bn)))
+ return;
+
+ if (BTREE_NODE_LEVEL(bn) >= BTREE_MAX_DEPTH)
+ return;
+
+ rcu_read_lock();
+ struct found_btree_node n = {
+ .btree_id = BTREE_NODE_ID(bn),
+ .level = BTREE_NODE_LEVEL(bn),
+ .seq = BTREE_NODE_SEQ(bn),
+ .cookie = le64_to_cpu(bn->keys.seq),
+ .min_key = bn->min_key,
+ .max_key = bn->max_key,
+ .nr_ptrs = 1,
+ .ptrs[0].type = 1 << BCH_EXTENT_ENTRY_ptr,
+ .ptrs[0].offset = offset,
+ .ptrs[0].dev = ca->dev_idx,
+ .ptrs[0].gen = *bucket_gen(ca, sector_to_bucket(ca, offset)),
+ };
+ rcu_read_unlock();
+
+ if (bch2_trans_run(c, found_btree_node_is_readable(trans, &n))) {
+ mutex_lock(&f->lock);
+ if (BSET_BIG_ENDIAN(&bn->keys) != CPU_BIG_ENDIAN) {
+ bch_err(c, "try_read_btree_node() can't handle endian conversion");
+ f->ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (darray_push(&f->nodes, n))
+ f->ret = -ENOMEM;
+unlock:
+ mutex_unlock(&f->lock);
+ }
+}
+
+static int read_btree_nodes_worker(void *p)
+{
+ struct find_btree_nodes_worker *w = p;
+ struct bch_fs *c = container_of(w->f, struct bch_fs, found_btree_nodes);
+ struct bch_dev *ca = w->ca;
+ void *buf = (void *) __get_free_page(GFP_KERNEL);
+ struct bio *bio = bio_alloc(NULL, 1, 0, GFP_KERNEL);
+ unsigned long last_print = jiffies;
+
+ if (!buf || !bio) {
+ bch_err(c, "read_btree_nodes_worker: error allocating bio/buf");
+ w->f->ret = -ENOMEM;
+ goto err;
+ }
+
+ for (u64 bucket = ca->mi.first_bucket; bucket < ca->mi.nbuckets; bucket++)
+ for (unsigned bucket_offset = 0;
+ bucket_offset + btree_sectors(c) <= ca->mi.bucket_size;
+ bucket_offset += btree_sectors(c)) {
+ if (time_after(jiffies, last_print + HZ * 30)) {
+ u64 cur_sector = bucket * ca->mi.bucket_size + bucket_offset;
+ u64 end_sector = ca->mi.nbuckets * ca->mi.bucket_size;
+
+ bch_info(ca, "%s: %2u%% done", __func__,
+ (unsigned) div64_u64(cur_sector * 100, end_sector));
+ last_print = jiffies;
+ }
+
+ u64 sector = bucket * ca->mi.bucket_size + bucket_offset;
+
+ if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_mi_btree_bitmap &&
+ !bch2_dev_btree_bitmap_marked_sectors(ca, sector, btree_sectors(c)))
+ continue;
+
+ try_read_btree_node(w->f, ca, bio, buf, sector);
+ }
+err:
+ bio_put(bio);
+ free_page((unsigned long) buf);
+ percpu_ref_get(&ca->io_ref);
+ closure_put(w->cl);
+ kfree(w);
+ return 0;
+}
+
+static int read_btree_nodes(struct find_btree_nodes *f)
+{
+ struct bch_fs *c = container_of(f, struct bch_fs, found_btree_nodes);
+ struct closure cl;
+ int ret = 0;
+
+ closure_init_stack(&cl);
+
+ for_each_online_member(c, ca) {
+ if (!(ca->mi.data_allowed & BIT(BCH_DATA_btree)))
+ continue;
+
+ struct find_btree_nodes_worker *w = kmalloc(sizeof(*w), GFP_KERNEL);
+ struct task_struct *t;
+
+ if (!w) {
+ percpu_ref_put(&ca->io_ref);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ percpu_ref_get(&ca->io_ref);
+ closure_get(&cl);
+ w->cl = &cl;
+ w->f = f;
+ w->ca = ca;
+
+ t = kthread_run(read_btree_nodes_worker, w, "read_btree_nodes/%s", ca->name);
+ ret = IS_ERR_OR_NULL(t);
+ if (ret) {
+ percpu_ref_put(&ca->io_ref);
+ closure_put(&cl);
+ f->ret = ret;
+ bch_err(c, "error starting kthread: %i", ret);
+ break;
+ }
+ }
+err:
+ closure_sync(&cl);
+ return f->ret ?: ret;
+}
+
+static void bubble_up(struct found_btree_node *n, struct found_btree_node *end)
+{
+ while (n + 1 < end &&
+ found_btree_node_cmp_pos(n, n + 1) > 0) {
+ swap(n[0], n[1]);
+ n++;
+ }
+}
+
+static int handle_overwrites(struct bch_fs *c,
+ struct found_btree_node *start,
+ struct found_btree_node *end)
+{
+ struct found_btree_node *n;
+again:
+ for (n = start + 1;
+ n < end &&
+ n->btree_id == start->btree_id &&
+ n->level == start->level &&
+ bpos_lt(n->min_key, start->max_key);
+ n++) {
+ int cmp = found_btree_node_cmp_time(start, n);
+
+ if (cmp > 0) {
+ if (bpos_cmp(start->max_key, n->max_key) >= 0)
+ n->overwritten = true;
+ else {
+ n->range_updated = true;
+ n->min_key = bpos_successor(start->max_key);
+ n->range_updated = true;
+ bubble_up(n, end);
+ goto again;
+ }
+ } else if (cmp < 0) {
+ BUG_ON(bpos_cmp(n->min_key, start->min_key) <= 0);
+
+ start->max_key = bpos_predecessor(n->min_key);
+ start->range_updated = true;
+ } else if (n->level) {
+ n->overwritten = true;
+ } else {
+ struct printbuf buf = PRINTBUF;
+
+ prt_str(&buf, "overlapping btree nodes with same seq! halting\n ");
+ found_btree_node_to_text(&buf, c, start);
+ prt_str(&buf, "\n ");
+ found_btree_node_to_text(&buf, c, n);
+ bch_err(c, "%s", buf.buf);
+ printbuf_exit(&buf);
+ return -BCH_ERR_fsck_repair_unimplemented;
+ }
+ }
+
+ return 0;
+}
+
+int bch2_scan_for_btree_nodes(struct bch_fs *c)
+{
+ struct find_btree_nodes *f = &c->found_btree_nodes;
+ struct printbuf buf = PRINTBUF;
+ size_t dst;
+ int ret = 0;
+
+ if (f->nodes.nr)
+ return 0;
+
+ mutex_init(&f->lock);
+
+ ret = read_btree_nodes(f);
+ if (ret)
+ return ret;
+
+ if (!f->nodes.nr) {
+ bch_err(c, "%s: no btree nodes found", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (0 && c->opts.verbose) {
+ printbuf_reset(&buf);
+ prt_printf(&buf, "%s: nodes found:\n", __func__);
+ found_btree_nodes_to_text(&buf, c, f->nodes);
+ bch2_print_string_as_lines(KERN_INFO, buf.buf);
+ }
+
+ sort(f->nodes.data, f->nodes.nr, sizeof(f->nodes.data[0]), found_btree_node_cmp_cookie, NULL);
+
+ dst = 0;
+ darray_for_each(f->nodes, i) {
+ struct found_btree_node *prev = dst ? f->nodes.data + dst - 1 : NULL;
+
+ if (prev &&
+ prev->cookie == i->cookie) {
+ if (prev->nr_ptrs == ARRAY_SIZE(prev->ptrs)) {
+ bch_err(c, "%s: found too many replicas for btree node", __func__);
+ ret = -EINVAL;
+ goto err;
+ }
+ prev->ptrs[prev->nr_ptrs++] = i->ptrs[0];
+ } else {
+ f->nodes.data[dst++] = *i;
+ }
+ }
+ f->nodes.nr = dst;
+
+ sort(f->nodes.data, f->nodes.nr, sizeof(f->nodes.data[0]), found_btree_node_cmp_pos, NULL);
+
+ if (0 && c->opts.verbose) {
+ printbuf_reset(&buf);
+ prt_printf(&buf, "%s: nodes after merging replicas:\n", __func__);
+ found_btree_nodes_to_text(&buf, c, f->nodes);
+ bch2_print_string_as_lines(KERN_INFO, buf.buf);
+ }
+
+ dst = 0;
+ darray_for_each(f->nodes, i) {
+ if (i->overwritten)
+ continue;
+
+ ret = handle_overwrites(c, i, &darray_top(f->nodes));
+ if (ret)
+ goto err;
+
+ BUG_ON(i->overwritten);
+ f->nodes.data[dst++] = *i;
+ }
+ f->nodes.nr = dst;
+
+ if (c->opts.verbose) {
+ printbuf_reset(&buf);
+ prt_printf(&buf, "%s: nodes found after overwrites:\n", __func__);
+ found_btree_nodes_to_text(&buf, c, f->nodes);
+ bch2_print_string_as_lines(KERN_INFO, buf.buf);
+ }
+
+ eytzinger0_sort(f->nodes.data, f->nodes.nr, sizeof(f->nodes.data[0]), found_btree_node_cmp_pos, NULL);
+err:
+ printbuf_exit(&buf);
+ return ret;
+}
+
+static int found_btree_node_range_start_cmp(const void *_l, const void *_r)
+{
+ const struct found_btree_node *l = _l;
+ const struct found_btree_node *r = _r;
+
+ return cmp_int(l->btree_id, r->btree_id) ?:
+ -cmp_int(l->level, r->level) ?:
+ bpos_cmp(l->max_key, r->min_key);
+}
+
+#define for_each_found_btree_node_in_range(_f, _search, _idx) \
+ for (size_t _idx = eytzinger0_find_gt((_f)->nodes.data, (_f)->nodes.nr, \
+ sizeof((_f)->nodes.data[0]), \
+ found_btree_node_range_start_cmp, &search); \
+ _idx < (_f)->nodes.nr && \
+ (_f)->nodes.data[_idx].btree_id == _search.btree_id && \
+ (_f)->nodes.data[_idx].level == _search.level && \
+ bpos_lt((_f)->nodes.data[_idx].min_key, _search.max_key); \
+ _idx = eytzinger0_next(_idx, (_f)->nodes.nr))
+
+bool bch2_btree_node_is_stale(struct bch_fs *c, struct btree *b)
+{
+ struct find_btree_nodes *f = &c->found_btree_nodes;
+
+ struct found_btree_node search = {
+ .btree_id = b->c.btree_id,
+ .level = b->c.level,
+ .min_key = b->data->min_key,
+ .max_key = b->key.k.p,
+ };
+
+ for_each_found_btree_node_in_range(f, search, idx)
+ if (f->nodes.data[idx].seq > BTREE_NODE_SEQ(b->data))
+ return true;
+ return false;
+}
+
+bool bch2_btree_has_scanned_nodes(struct bch_fs *c, enum btree_id btree)
+{
+ struct found_btree_node search = {
+ .btree_id = btree,
+ .level = 0,
+ .min_key = POS_MIN,
+ .max_key = SPOS_MAX,
+ };
+
+ for_each_found_btree_node_in_range(&c->found_btree_nodes, search, idx)
+ return true;
+ return false;
+}
+
+int bch2_get_scanned_nodes(struct bch_fs *c, enum btree_id btree,
+ unsigned level, struct bpos node_min, struct bpos node_max)
+{
+ if (btree_id_is_alloc(btree))
+ return 0;
+
+ struct find_btree_nodes *f = &c->found_btree_nodes;
+
+ int ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_scan_for_btree_nodes);
+ if (ret)
+ return ret;
+
+ if (c->opts.verbose) {
+ struct printbuf buf = PRINTBUF;
+
+ prt_printf(&buf, "recovering %s l=%u ", bch2_btree_id_str(btree), level);
+ bch2_bpos_to_text(&buf, node_min);
+ prt_str(&buf, " - ");
+ bch2_bpos_to_text(&buf, node_max);
+
+ bch_info(c, "%s(): %s", __func__, buf.buf);
+ printbuf_exit(&buf);
+ }
+
+ struct found_btree_node search = {
+ .btree_id = btree,
+ .level = level,
+ .min_key = node_min,
+ .max_key = node_max,
+ };
+
+ for_each_found_btree_node_in_range(f, search, idx) {
+ struct found_btree_node n = f->nodes.data[idx];
+
+ n.range_updated |= bpos_lt(n.min_key, node_min);
+ n.min_key = bpos_max(n.min_key, node_min);
+
+ n.range_updated |= bpos_gt(n.max_key, node_max);
+ n.max_key = bpos_min(n.max_key, node_max);
+
+ struct { __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX); } tmp;
+
+ found_btree_node_to_key(&tmp.k, &n);
+
+ struct printbuf buf = PRINTBUF;
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&tmp.k));
+ bch_verbose(c, "%s(): recovering %s", __func__, buf.buf);
+ printbuf_exit(&buf);
+
+ BUG_ON(bch2_bkey_invalid(c, bkey_i_to_s_c(&tmp.k), BKEY_TYPE_btree, 0, NULL));
+
+ ret = bch2_journal_key_insert(c, btree, level + 1, &tmp.k);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void bch2_find_btree_nodes_exit(struct find_btree_nodes *f)
+{
+ darray_exit(&f->nodes);
+}
diff --git a/fs/bcachefs/btree_node_scan.h b/fs/bcachefs/btree_node_scan.h
new file mode 100644
index 000000000000..08687b209787
--- /dev/null
+++ b/fs/bcachefs/btree_node_scan.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BTREE_NODE_SCAN_H
+#define _BCACHEFS_BTREE_NODE_SCAN_H
+
+int bch2_scan_for_btree_nodes(struct bch_fs *);
+bool bch2_btree_node_is_stale(struct bch_fs *, struct btree *);
+bool bch2_btree_has_scanned_nodes(struct bch_fs *, enum btree_id);
+int bch2_get_scanned_nodes(struct bch_fs *, enum btree_id, unsigned, struct bpos, struct bpos);
+void bch2_find_btree_nodes_exit(struct find_btree_nodes *);
+
+#endif /* _BCACHEFS_BTREE_NODE_SCAN_H */
diff --git a/fs/bcachefs/btree_node_scan_types.h b/fs/bcachefs/btree_node_scan_types.h
new file mode 100644
index 000000000000..5cfaeb5ac831
--- /dev/null
+++ b/fs/bcachefs/btree_node_scan_types.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BTREE_NODE_SCAN_TYPES_H
+#define _BCACHEFS_BTREE_NODE_SCAN_TYPES_H
+
+#include "darray.h"
+
+struct found_btree_node {
+ bool range_updated:1;
+ bool overwritten:1;
+ u8 btree_id;
+ u8 level;
+ unsigned sectors_written;
+ u32 seq;
+ u64 cookie;
+
+ struct bpos min_key;
+ struct bpos max_key;
+
+ unsigned nr_ptrs;
+ struct bch_extent_ptr ptrs[BCH_REPLICAS_MAX];
+};
+
+typedef DARRAY(struct found_btree_node) found_btree_nodes;
+
+struct find_btree_nodes {
+ int ret;
+ struct mutex lock;
+ found_btree_nodes nodes;
+};
+
+#endif /* _BCACHEFS_BTREE_NODE_SCAN_TYPES_H */
diff --git a/fs/bcachefs/btree_trans_commit.c b/fs/bcachefs/btree_trans_commit.c
index 30d69a6d133e..74e1ff225674 100644
--- a/fs/bcachefs/btree_trans_commit.c
+++ b/fs/bcachefs/btree_trans_commit.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
+#include "alloc_foreground.h"
#include "btree_gc.h"
#include "btree_io.h"
#include "btree_iter.h"
@@ -19,6 +20,26 @@
#include <linux/prefetch.h>
+static const char * const trans_commit_flags_strs[] = {
+#define x(n, ...) #n,
+ BCH_TRANS_COMMIT_FLAGS()
+#undef x
+ NULL
+};
+
+void bch2_trans_commit_flags_to_text(struct printbuf *out, enum bch_trans_commit_flags flags)
+{
+ enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
+
+ prt_printf(out, "watermark=%s", bch2_watermarks[watermark]);
+
+ flags >>= BCH_WATERMARK_BITS;
+ if (flags) {
+ prt_char(out, ' ');
+ bch2_prt_bitflags(out, trans_commit_flags_strs, flags);
+ }
+}
+
static void verify_update_old_key(struct btree_trans *trans, struct btree_insert_entry *i)
{
#ifdef CONFIG_BCACHEFS_DEBUG
@@ -315,10 +336,10 @@ static inline void btree_insert_entry_checks(struct btree_trans *trans,
BUG_ON(i->btree_id != path->btree_id);
EBUG_ON(!i->level &&
btree_type_has_snapshots(i->btree_id) &&
- !(i->flags & BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) &&
- test_bit(JOURNAL_REPLAY_DONE, &trans->c->journal.flags) &&
+ !(i->flags & BTREE_UPDATE_internal_snapshot_node) &&
+ test_bit(JOURNAL_replay_done, &trans->c->journal.flags) &&
i->k->k.p.snapshot &&
- bch2_snapshot_is_internal_node(trans->c, i->k->k.p.snapshot));
+ bch2_snapshot_is_internal_node(trans->c, i->k->k.p.snapshot) > 0);
}
static __always_inline int bch2_trans_journal_res_get(struct btree_trans *trans,
@@ -397,12 +418,13 @@ static int btree_key_can_insert_cached(struct btree_trans *trans, unsigned flags
struct bkey_cached *ck = (void *) path->l[0].b;
unsigned new_u64s;
struct bkey_i *new_k;
+ unsigned watermark = flags & BCH_WATERMARK_MASK;
EBUG_ON(path->level);
- if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
- bch2_btree_key_cache_must_wait(c) &&
- !(flags & BCH_TRANS_COMMIT_journal_reclaim))
+ if (watermark < BCH_WATERMARK_reclaim &&
+ !test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
+ bch2_btree_key_cache_must_wait(c))
return -BCH_ERR_btree_insert_need_journal_reclaim;
/*
@@ -442,13 +464,13 @@ static int run_one_mem_trigger(struct btree_trans *trans,
verify_update_old_key(trans, i);
- if (unlikely(flags & BTREE_TRIGGER_NORUN))
+ if (unlikely(flags & BTREE_TRIGGER_norun))
return 0;
if (old_ops->trigger == new_ops->trigger) {
ret = bch2_key_trigger(trans, i->btree_id, i->level,
old, bkey_i_to_s(new),
- BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
+ BTREE_TRIGGER_insert|BTREE_TRIGGER_overwrite|flags);
} else {
ret = bch2_key_trigger_new(trans, i->btree_id, i->level,
bkey_i_to_s(new), flags) ?:
@@ -471,11 +493,11 @@ static int run_one_trans_trigger(struct btree_trans *trans, struct btree_insert_
struct bkey_s_c old = { &old_k, i->old_v };
const struct bkey_ops *old_ops = bch2_bkey_type_ops(old.k->type);
const struct bkey_ops *new_ops = bch2_bkey_type_ops(i->k->k.type);
- unsigned flags = i->flags|BTREE_TRIGGER_TRANSACTIONAL;
+ unsigned flags = i->flags|BTREE_TRIGGER_transactional;
verify_update_old_key(trans, i);
- if ((i->flags & BTREE_TRIGGER_NORUN) ||
+ if ((i->flags & BTREE_TRIGGER_norun) ||
!(BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS & (1U << i->bkey_type)))
return 0;
@@ -485,8 +507,8 @@ static int run_one_trans_trigger(struct btree_trans *trans, struct btree_insert_
i->overwrite_trigger_run = true;
i->insert_trigger_run = true;
return bch2_key_trigger(trans, i->btree_id, i->level, old, bkey_i_to_s(i->k),
- BTREE_TRIGGER_INSERT|
- BTREE_TRIGGER_OVERWRITE|flags) ?: 1;
+ BTREE_TRIGGER_insert|
+ BTREE_TRIGGER_overwrite|flags) ?: 1;
} else if (overwrite && !i->overwrite_trigger_run) {
i->overwrite_trigger_run = true;
return bch2_key_trigger_old(trans, i->btree_id, i->level, old, flags) ?: 1;
@@ -499,9 +521,8 @@ static int run_one_trans_trigger(struct btree_trans *trans, struct btree_insert_
}
static int run_btree_triggers(struct btree_trans *trans, enum btree_id btree_id,
- struct btree_insert_entry *btree_id_start)
+ unsigned btree_id_start)
{
- struct btree_insert_entry *i;
bool trans_trigger_run;
int ret, overwrite;
@@ -514,13 +535,13 @@ static int run_btree_triggers(struct btree_trans *trans, enum btree_id btree_id,
do {
trans_trigger_run = false;
- for (i = btree_id_start;
- i < trans->updates + trans->nr_updates && i->btree_id <= btree_id;
+ for (unsigned i = btree_id_start;
+ i < trans->nr_updates && trans->updates[i].btree_id <= btree_id;
i++) {
- if (i->btree_id != btree_id)
+ if (trans->updates[i].btree_id != btree_id)
continue;
- ret = run_one_trans_trigger(trans, i, overwrite);
+ ret = run_one_trans_trigger(trans, trans->updates + i, overwrite);
if (ret < 0)
return ret;
if (ret)
@@ -534,8 +555,7 @@ static int run_btree_triggers(struct btree_trans *trans, enum btree_id btree_id,
static int bch2_trans_commit_run_triggers(struct btree_trans *trans)
{
- struct btree_insert_entry *btree_id_start = trans->updates;
- unsigned btree_id = 0;
+ unsigned btree_id = 0, btree_id_start = 0;
int ret = 0;
/*
@@ -549,8 +569,8 @@ static int bch2_trans_commit_run_triggers(struct btree_trans *trans)
if (btree_id == BTREE_ID_alloc)
continue;
- while (btree_id_start < trans->updates + trans->nr_updates &&
- btree_id_start->btree_id < btree_id)
+ while (btree_id_start < trans->nr_updates &&
+ trans->updates[btree_id_start].btree_id < btree_id)
btree_id_start++;
ret = run_btree_triggers(trans, btree_id, btree_id_start);
@@ -558,11 +578,13 @@ static int bch2_trans_commit_run_triggers(struct btree_trans *trans)
return ret;
}
- trans_for_each_update(trans, i) {
+ for (unsigned idx = 0; idx < trans->nr_updates; idx++) {
+ struct btree_insert_entry *i = trans->updates + idx;
+
if (i->btree_id > BTREE_ID_alloc)
break;
if (i->btree_id == BTREE_ID_alloc) {
- ret = run_btree_triggers(trans, BTREE_ID_alloc, i);
+ ret = run_btree_triggers(trans, BTREE_ID_alloc, idx);
if (ret)
return ret;
break;
@@ -571,7 +593,7 @@ static int bch2_trans_commit_run_triggers(struct btree_trans *trans)
#ifdef CONFIG_BCACHEFS_DEBUG
trans_for_each_update(trans, i)
- BUG_ON(!(i->flags & BTREE_TRIGGER_NORUN) &&
+ BUG_ON(!(i->flags & BTREE_TRIGGER_norun) &&
(BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS & (1U << i->bkey_type)) &&
(!i->insert_trigger_run || !i->overwrite_trigger_run));
#endif
@@ -589,7 +611,7 @@ static noinline int bch2_trans_commit_run_gc_triggers(struct btree_trans *trans)
if (btree_node_type_needs_gc(__btree_node_type(i->level, i->btree_id)) &&
gc_visited(trans->c, gc_pos_btree_node(insert_l(trans, i)->b))) {
- int ret = run_one_mem_trigger(trans, i, i->flags|BTREE_TRIGGER_GC);
+ int ret = run_one_mem_trigger(trans, i, i->flags|BTREE_TRIGGER_gc);
if (ret)
return ret;
}
@@ -608,6 +630,9 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
unsigned u64s = 0;
int ret;
+ bch2_trans_verify_not_unlocked(trans);
+ bch2_trans_verify_not_in_restart(trans);
+
if (race_fault()) {
trace_and_count(c, trans_restart_fault_inject, trans, trace_ip);
return btree_trans_restart_nounlock(trans, BCH_ERR_transaction_restart_fault_inject);
@@ -685,7 +710,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
trans_for_each_update(trans, i)
if (BTREE_NODE_TYPE_HAS_ATOMIC_TRIGGERS & (1U << i->bkey_type)) {
- ret = run_one_mem_trigger(trans, i, BTREE_TRIGGER_ATOMIC|i->flags);
+ ret = run_one_mem_trigger(trans, i, BTREE_TRIGGER_atomic|i->flags);
if (ret)
goto fatal_err;
}
@@ -704,7 +729,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
if (i->key_cache_already_flushed)
continue;
- if (i->flags & BTREE_UPDATE_NOJOURNAL)
+ if (i->flags & BTREE_UPDATE_nojournal)
continue;
verify_update_old_key(trans, i);
@@ -765,16 +790,15 @@ static noinline void bch2_drop_overwrites_from_journal(struct btree_trans *trans
}
static noinline int bch2_trans_commit_bkey_invalid(struct btree_trans *trans,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
struct btree_insert_entry *i,
struct printbuf *err)
{
struct bch_fs *c = trans->c;
printbuf_reset(err);
- prt_printf(err, "invalid bkey on insert from %s -> %ps",
+ prt_printf(err, "invalid bkey on insert from %s -> %ps\n",
trans->fn, (void *) i->ip_allocated);
- prt_newline(err);
printbuf_indent_add(err, 2);
bch2_bkey_val_to_text(err, c, bkey_i_to_s_c(i->k));
@@ -795,8 +819,7 @@ static noinline int bch2_trans_commit_journal_entry_invalid(struct btree_trans *
struct bch_fs *c = trans->c;
struct printbuf buf = PRINTBUF;
- prt_printf(&buf, "invalid bkey on insert from %s", trans->fn);
- prt_newline(&buf);
+ prt_printf(&buf, "invalid bkey on insert from %s\n", trans->fn);
printbuf_indent_add(&buf, 2);
bch2_journal_entry_to_text(&buf, c, i);
@@ -826,7 +849,8 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, unsigned flags
struct bch_fs *c = trans->c;
int ret = 0, u64s_delta = 0;
- trans_for_each_update(trans, i) {
+ for (unsigned idx = 0; idx < trans->nr_updates; idx++) {
+ struct btree_insert_entry *i = trans->updates + idx;
if (i->cached)
continue;
@@ -887,6 +911,7 @@ int bch2_trans_commit_error(struct btree_trans *trans, unsigned flags,
int ret, unsigned long trace_ip)
{
struct bch_fs *c = trans->c;
+ enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
switch (ret) {
case -BCH_ERR_btree_insert_btree_node_full:
@@ -905,7 +930,7 @@ int bch2_trans_commit_error(struct btree_trans *trans, unsigned flags,
* flag
*/
if ((flags & BCH_TRANS_COMMIT_journal_reclaim) &&
- (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim) {
+ watermark < BCH_WATERMARK_reclaim) {
ret = -BCH_ERR_journal_reclaim_would_deadlock;
break;
}
@@ -985,6 +1010,9 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
struct bch_fs *c = trans->c;
int ret = 0;
+ bch2_trans_verify_not_unlocked(trans);
+ bch2_trans_verify_not_in_restart(trans);
+
if (!trans->nr_updates &&
!trans->journal_entries_u64s)
goto out_reset;
@@ -997,10 +1025,10 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
trans_for_each_update(trans, i) {
struct printbuf buf = PRINTBUF;
- enum bkey_invalid_flags invalid_flags = 0;
+ enum bch_validate_flags invalid_flags = 0;
if (!(flags & BCH_TRANS_COMMIT_no_journal_res))
- invalid_flags |= BKEY_INVALID_WRITE|BKEY_INVALID_COMMIT;
+ invalid_flags |= BCH_VALIDATE_write|BCH_VALIDATE_commit;
if (unlikely(bch2_bkey_invalid(c, bkey_i_to_s_c(i->k),
i->bkey_type, invalid_flags, &buf)))
@@ -1015,10 +1043,10 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
for (struct jset_entry *i = trans->journal_entries;
i != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s);
i = vstruct_next(i)) {
- enum bkey_invalid_flags invalid_flags = 0;
+ enum bch_validate_flags invalid_flags = 0;
if (!(flags & BCH_TRANS_COMMIT_no_journal_res))
- invalid_flags |= BKEY_INVALID_WRITE|BKEY_INVALID_COMMIT;
+ invalid_flags |= BCH_VALIDATE_write|BCH_VALIDATE_commit;
if (unlikely(bch2_journal_entry_validate(c, NULL, i,
bcachefs_metadata_version_current,
@@ -1062,7 +1090,7 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
if (i->key_cache_already_flushed)
continue;
- if (i->flags & BTREE_UPDATE_NOJOURNAL)
+ if (i->flags & BTREE_UPDATE_nojournal)
continue;
/* we're going to journal the key being updated: */
@@ -1083,6 +1111,7 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
}
retry:
errored_at = NULL;
+ bch2_trans_verify_not_unlocked(trans);
bch2_trans_verify_not_in_restart(trans);
if (likely(!(flags & BCH_TRANS_COMMIT_no_journal_res)))
memset(&trans->journal_res, 0, sizeof(trans->journal_res));
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
index 9404d96c38f3..d63db4fefe73 100644
--- a/fs/bcachefs/btree_types.h
+++ b/fs/bcachefs/btree_types.h
@@ -163,9 +163,21 @@ struct btree_cache {
/* Number of elements in live + freeable lists */
unsigned used;
unsigned reserve;
+ unsigned freed;
+ unsigned not_freed_lock_intent;
+ unsigned not_freed_lock_write;
+ unsigned not_freed_dirty;
+ unsigned not_freed_read_in_flight;
+ unsigned not_freed_write_in_flight;
+ unsigned not_freed_noevict;
+ unsigned not_freed_write_blocked;
+ unsigned not_freed_will_make_reachable;
+ unsigned not_freed_access_bit;
atomic_t dirty;
struct shrinker *shrink;
+ unsigned used_by_btree[BTREE_ID_NR];
+
/*
* If we need to allocate memory for a new btree node and that
* allocation fails, we can cannibalize another node in the btree cache
@@ -187,36 +199,89 @@ struct btree_node_iter {
} data[MAX_BSETS];
};
+#define BTREE_ITER_FLAGS() \
+ x(slots) \
+ x(intent) \
+ x(prefetch) \
+ x(is_extents) \
+ x(not_extents) \
+ x(cached) \
+ x(with_key_cache) \
+ x(with_updates) \
+ x(with_journal) \
+ x(snapshot_field) \
+ x(all_snapshots) \
+ x(filter_snapshots) \
+ x(nopreserve) \
+ x(cached_nofill) \
+ x(key_cache_fill) \
+
+#define STR_HASH_FLAGS() \
+ x(must_create) \
+ x(must_replace)
+
+#define BTREE_UPDATE_FLAGS() \
+ x(internal_snapshot_node) \
+ x(nojournal) \
+ x(key_cache_reclaim)
+
+
/*
- * Iterate over all possible positions, synthesizing deleted keys for holes:
- */
-static const __maybe_unused u16 BTREE_ITER_SLOTS = 1 << 0;
-/*
- * Indicates that intent locks should be taken on leaf nodes, because we expect
- * to be doing updates:
- */
-static const __maybe_unused u16 BTREE_ITER_INTENT = 1 << 1;
-/*
- * Causes the btree iterator code to prefetch additional btree nodes from disk:
- */
-static const __maybe_unused u16 BTREE_ITER_PREFETCH = 1 << 2;
-/*
- * Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
- * @pos or the first key strictly greater than @pos
+ * BTREE_TRIGGER_norun - don't run triggers at all
+ *
+ * BTREE_TRIGGER_transactional - we're running transactional triggers as part of
+ * a transaction commit: triggers may generate new updates
+ *
+ * BTREE_TRIGGER_atomic - we're running atomic triggers during a transaction
+ * commit: we have our journal reservation, we're holding btree node write
+ * locks, and we know the transaction is going to commit (returning an error
+ * here is a fatal error, causing us to go emergency read-only)
+ *
+ * BTREE_TRIGGER_gc - we're in gc/fsck: running triggers to recalculate e.g. disk usage
+ *
+ * BTREE_TRIGGER_insert - @new is entering the btree
+ * BTREE_TRIGGER_overwrite - @old is leaving the btree
+ *
+ * BTREE_TRIGGER_bucket_invalidate - signal from bucket invalidate path to alloc
+ * trigger
*/
-static const __maybe_unused u16 BTREE_ITER_IS_EXTENTS = 1 << 3;
-static const __maybe_unused u16 BTREE_ITER_NOT_EXTENTS = 1 << 4;
-static const __maybe_unused u16 BTREE_ITER_CACHED = 1 << 5;
-static const __maybe_unused u16 BTREE_ITER_WITH_KEY_CACHE = 1 << 6;
-static const __maybe_unused u16 BTREE_ITER_WITH_UPDATES = 1 << 7;
-static const __maybe_unused u16 BTREE_ITER_WITH_JOURNAL = 1 << 8;
-static const __maybe_unused u16 __BTREE_ITER_ALL_SNAPSHOTS = 1 << 9;
-static const __maybe_unused u16 BTREE_ITER_ALL_SNAPSHOTS = 1 << 10;
-static const __maybe_unused u16 BTREE_ITER_FILTER_SNAPSHOTS = 1 << 11;
-static const __maybe_unused u16 BTREE_ITER_NOPRESERVE = 1 << 12;
-static const __maybe_unused u16 BTREE_ITER_CACHED_NOFILL = 1 << 13;
-static const __maybe_unused u16 BTREE_ITER_KEY_CACHE_FILL = 1 << 14;
-#define __BTREE_ITER_FLAGS_END 15
+#define BTREE_TRIGGER_FLAGS() \
+ x(norun) \
+ x(transactional) \
+ x(atomic) \
+ x(check_repair) \
+ x(gc) \
+ x(insert) \
+ x(overwrite) \
+ x(is_root) \
+ x(bucket_invalidate)
+
+enum {
+#define x(n) BTREE_ITER_FLAG_BIT_##n,
+ BTREE_ITER_FLAGS()
+ STR_HASH_FLAGS()
+ BTREE_UPDATE_FLAGS()
+ BTREE_TRIGGER_FLAGS()
+#undef x
+};
+
+/* iter flags must fit in a u16: */
+//BUILD_BUG_ON(BTREE_ITER_FLAG_BIT_key_cache_fill > 15);
+
+enum btree_iter_update_trigger_flags {
+#define x(n) BTREE_ITER_##n = 1U << BTREE_ITER_FLAG_BIT_##n,
+ BTREE_ITER_FLAGS()
+#undef x
+#define x(n) STR_HASH_##n = 1U << BTREE_ITER_FLAG_BIT_##n,
+ STR_HASH_FLAGS()
+#undef x
+#define x(n) BTREE_UPDATE_##n = 1U << BTREE_ITER_FLAG_BIT_##n,
+ BTREE_UPDATE_FLAGS()
+#undef x
+#define x(n) BTREE_TRIGGER_##n = 1U << BTREE_ITER_FLAG_BIT_##n,
+ BTREE_TRIGGER_FLAGS()
+#undef x
+};
enum btree_path_uptodate {
BTREE_ITER_UPTODATE = 0,
@@ -307,7 +372,7 @@ struct btree_iter {
*/
struct bkey k;
- /* BTREE_ITER_WITH_JOURNAL: */
+ /* BTREE_ITER_with_journal: */
size_t journal_idx;
#ifdef TRACK_PATH_ALLOCATED
unsigned long ip_allocated;
@@ -321,9 +386,9 @@ struct bkey_cached {
struct btree_bkey_cached_common c;
unsigned long flags;
+ unsigned long btree_trans_barrier_seq;
u16 u64s;
bool valid;
- u32 btree_trans_barrier_seq;
struct bkey_cached_key key;
struct rhash_head hash;
@@ -364,7 +429,21 @@ struct btree_insert_entry {
unsigned long ip_allocated;
};
+/* Number of btree paths we preallocate, usually enough */
#define BTREE_ITER_INITIAL 64
+/*
+ * Lmiit for btree_trans_too_many_iters(); this is enough that almost all code
+ * paths should run inside this limit, and if they don't it usually indicates a
+ * bug (leaking/duplicated btree paths).
+ *
+ * exception: some fsck paths
+ *
+ * bugs with excessive path usage seem to have possibly been eliminated now, so
+ * we might consider eliminating this (and btree_trans_too_many_iter()) at some
+ * point.
+ */
+#define BTREE_ITER_NORMAL_LIMIT 256
+/* never exceed limit */
#define BTREE_ITER_MAX (1U << 10)
struct btree_trans_commit_hook;
@@ -404,6 +483,8 @@ struct btree_trans {
u8 lock_must_abort;
bool lock_may_not_fail:1;
bool srcu_held:1;
+ bool locked:1;
+ bool write_locked:1;
bool used_mempool:1;
bool in_traverse_all:1;
bool paths_sorted:1;
@@ -411,13 +492,13 @@ struct btree_trans {
bool journal_transaction_names:1;
bool journal_replay_not_finished:1;
bool notrace_relock_fail:1;
- bool write_locked:1;
enum bch_errcode restarted:16;
u32 restart_count;
u64 last_begin_time;
unsigned long last_begin_ip;
unsigned long last_restarted_ip;
+ unsigned long last_unlock_ip;
unsigned long srcu_lock_time;
const char *fn;
diff --git a/fs/bcachefs/btree_update.c b/fs/bcachefs/btree_update.c
index a4b40c1656a5..f3c645a43dcb 100644
--- a/fs/bcachefs/btree_update.c
+++ b/fs/bcachefs/btree_update.c
@@ -25,19 +25,22 @@ static inline int btree_insert_entry_cmp(const struct btree_insert_entry *l,
static int __must_check
bch2_trans_update_by_path(struct btree_trans *, btree_path_idx_t,
- struct bkey_i *, enum btree_update_flags,
+ struct bkey_i *, enum btree_iter_update_trigger_flags,
unsigned long ip);
static noinline int extent_front_merge(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_s_c k,
struct bkey_i **insert,
- enum btree_update_flags flags)
+ enum btree_iter_update_trigger_flags flags)
{
struct bch_fs *c = trans->c;
struct bkey_i *update;
int ret;
+ if (unlikely(trans->journal_replay_not_finished))
+ return 0;
+
update = bch2_bkey_make_mut_noupdate(trans, k);
ret = PTR_ERR_OR_ZERO(update);
if (ret)
@@ -69,6 +72,9 @@ static noinline int extent_back_merge(struct btree_trans *trans,
struct bch_fs *c = trans->c;
int ret;
+ if (unlikely(trans->journal_replay_not_finished))
+ return 0;
+
ret = bch2_key_has_snapshot_overwrites(trans, iter->btree_id, insert->k.p) ?:
bch2_key_has_snapshot_overwrites(trans, iter->btree_id, k.k->p);
if (ret < 0)
@@ -98,8 +104,8 @@ static int need_whiteout_for_snapshot(struct btree_trans *trans,
pos.snapshot++;
for_each_btree_key_norestart(trans, iter, btree_id, pos,
- BTREE_ITER_ALL_SNAPSHOTS|
- BTREE_ITER_NOPRESERVE, k, ret) {
+ BTREE_ITER_all_snapshots|
+ BTREE_ITER_nopreserve, k, ret) {
if (!bkey_eq(k.k->p, pos))
break;
@@ -132,8 +138,8 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
darray_init(&s);
bch2_trans_iter_init(trans, &old_iter, id, old_pos,
- BTREE_ITER_NOT_EXTENTS|
- BTREE_ITER_ALL_SNAPSHOTS);
+ BTREE_ITER_not_extents|
+ BTREE_ITER_all_snapshots);
while ((old_k = bch2_btree_iter_prev(&old_iter)).k &&
!(ret = bkey_err(old_k)) &&
bkey_eq(old_pos, old_k.k->p)) {
@@ -145,8 +151,8 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
continue;
new_k = bch2_bkey_get_iter(trans, &new_iter, id, whiteout_pos,
- BTREE_ITER_NOT_EXTENTS|
- BTREE_ITER_INTENT);
+ BTREE_ITER_not_extents|
+ BTREE_ITER_intent);
ret = bkey_err(new_k);
if (ret)
break;
@@ -162,7 +168,7 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
update->k.type = KEY_TYPE_whiteout;
ret = bch2_trans_update(trans, &new_iter, update,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ BTREE_UPDATE_internal_snapshot_node);
}
bch2_trans_iter_exit(trans, &new_iter);
@@ -179,7 +185,7 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
struct btree_iter *iter,
- enum btree_update_flags flags,
+ enum btree_iter_update_trigger_flags flags,
struct bkey_s_c old,
struct bkey_s_c new)
{
@@ -212,7 +218,7 @@ int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
ret = bch2_insert_snapshot_whiteouts(trans, btree_id,
old.k->p, update->k.p) ?:
bch2_btree_insert_nonextent(trans, btree_id, update,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
+ BTREE_UPDATE_internal_snapshot_node|flags);
if (ret)
return ret;
}
@@ -229,7 +235,7 @@ int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
ret = bch2_insert_snapshot_whiteouts(trans, btree_id,
old.k->p, update->k.p) ?:
bch2_btree_insert_nonextent(trans, btree_id, update,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
+ BTREE_UPDATE_internal_snapshot_node|flags);
if (ret)
return ret;
}
@@ -254,7 +260,7 @@ int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
}
ret = bch2_btree_insert_nonextent(trans, btree_id, update,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
+ BTREE_UPDATE_internal_snapshot_node|flags);
if (ret)
return ret;
}
@@ -267,7 +273,7 @@ int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
bch2_cut_front(new.k->p, update);
ret = bch2_trans_update_by_path(trans, iter->path, update,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
+ BTREE_UPDATE_internal_snapshot_node|
flags, _RET_IP_);
if (ret)
return ret;
@@ -279,7 +285,7 @@ int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
static int bch2_trans_update_extent(struct btree_trans *trans,
struct btree_iter *orig_iter,
struct bkey_i *insert,
- enum btree_update_flags flags)
+ enum btree_iter_update_trigger_flags flags)
{
struct btree_iter iter;
struct bkey_s_c k;
@@ -287,9 +293,9 @@ static int bch2_trans_update_extent(struct btree_trans *trans,
int ret = 0;
bch2_trans_iter_init(trans, &iter, btree_id, bkey_start_pos(&insert->k),
- BTREE_ITER_INTENT|
- BTREE_ITER_WITH_UPDATES|
- BTREE_ITER_NOT_EXTENTS);
+ BTREE_ITER_intent|
+ BTREE_ITER_with_updates|
+ BTREE_ITER_not_extents);
k = bch2_btree_iter_peek_upto(&iter, POS(insert->k.p.inode, U64_MAX));
if ((ret = bkey_err(k)))
goto err;
@@ -340,7 +346,7 @@ err:
static noinline int flush_new_cached_update(struct btree_trans *trans,
struct btree_insert_entry *i,
- enum btree_update_flags flags,
+ enum btree_iter_update_trigger_flags flags,
unsigned long ip)
{
struct bkey k;
@@ -348,7 +354,7 @@ static noinline int flush_new_cached_update(struct btree_trans *trans,
btree_path_idx_t path_idx =
bch2_path_get(trans, i->btree_id, i->old_k.p, 1, 0,
- BTREE_ITER_INTENT, _THIS_IP_);
+ BTREE_ITER_intent, _THIS_IP_);
ret = bch2_btree_path_traverse(trans, path_idx, 0);
if (ret)
goto out;
@@ -366,7 +372,7 @@ static noinline int flush_new_cached_update(struct btree_trans *trans,
goto out;
i->key_cache_already_flushed = true;
- i->flags |= BTREE_TRIGGER_NORUN;
+ i->flags |= BTREE_TRIGGER_norun;
btree_path_set_should_be_locked(btree_path);
ret = bch2_trans_update_by_path(trans, path_idx, i->k, flags, ip);
@@ -377,7 +383,7 @@ out:
static int __must_check
bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx,
- struct bkey_i *k, enum btree_update_flags flags,
+ struct bkey_i *k, enum btree_iter_update_trigger_flags flags,
unsigned long ip)
{
struct bch_fs *c = trans->c;
@@ -473,15 +479,15 @@ static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans,
if (!iter->key_cache_path)
iter->key_cache_path =
bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
- BTREE_ITER_INTENT|
- BTREE_ITER_CACHED, _THIS_IP_);
+ BTREE_ITER_intent|
+ BTREE_ITER_cached, _THIS_IP_);
iter->key_cache_path =
bch2_btree_path_set_pos(trans, iter->key_cache_path, path->pos,
- iter->flags & BTREE_ITER_INTENT,
+ iter->flags & BTREE_ITER_intent,
_THIS_IP_);
- ret = bch2_btree_path_traverse(trans, iter->key_cache_path, BTREE_ITER_CACHED);
+ ret = bch2_btree_path_traverse(trans, iter->key_cache_path, BTREE_ITER_cached);
if (unlikely(ret))
return ret;
@@ -499,17 +505,17 @@ static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans,
}
int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
- struct bkey_i *k, enum btree_update_flags flags)
+ struct bkey_i *k, enum btree_iter_update_trigger_flags flags)
{
btree_path_idx_t path_idx = iter->update_path ?: iter->path;
int ret;
- if (iter->flags & BTREE_ITER_IS_EXTENTS)
+ if (iter->flags & BTREE_ITER_is_extents)
return bch2_trans_update_extent(trans, iter, k, flags);
if (bkey_deleted(&k->k) &&
- !(flags & BTREE_UPDATE_KEY_CACHE_RECLAIM) &&
- (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)) {
+ !(flags & BTREE_UPDATE_key_cache_reclaim) &&
+ (iter->flags & BTREE_ITER_filter_snapshots)) {
ret = need_whiteout_for_snapshot(trans, iter->btree_id, k->k.p);
if (unlikely(ret < 0))
return ret;
@@ -522,7 +528,7 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter
* Ensure that updates to cached btrees go to the key cache:
*/
struct btree_path *path = trans->paths + path_idx;
- if (!(flags & BTREE_UPDATE_KEY_CACHE_RECLAIM) &&
+ if (!(flags & BTREE_UPDATE_key_cache_reclaim) &&
!path->cached &&
!path->level &&
btree_id_cached(trans->c, path->btree_id)) {
@@ -581,7 +587,7 @@ int bch2_bkey_get_empty_slot(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_s_c k;
int ret = 0;
- bch2_trans_iter_init(trans, iter, btree, POS_MAX, BTREE_ITER_INTENT);
+ bch2_trans_iter_init(trans, iter, btree, POS_MAX, BTREE_ITER_intent);
k = bch2_btree_iter_prev(iter);
ret = bkey_err(k);
if (ret)
@@ -615,15 +621,15 @@ void bch2_trans_commit_hook(struct btree_trans *trans,
int bch2_btree_insert_nonextent(struct btree_trans *trans,
enum btree_id btree, struct bkey_i *k,
- enum btree_update_flags flags)
+ enum btree_iter_update_trigger_flags flags)
{
struct btree_iter iter;
int ret;
bch2_trans_iter_init(trans, &iter, btree, k->k.p,
- BTREE_ITER_CACHED|
- BTREE_ITER_NOT_EXTENTS|
- BTREE_ITER_INTENT);
+ BTREE_ITER_cached|
+ BTREE_ITER_not_extents|
+ BTREE_ITER_intent);
ret = bch2_btree_iter_traverse(&iter) ?:
bch2_trans_update(trans, &iter, k, flags);
bch2_trans_iter_exit(trans, &iter);
@@ -631,16 +637,13 @@ int bch2_btree_insert_nonextent(struct btree_trans *trans,
}
int bch2_btree_insert_trans(struct btree_trans *trans, enum btree_id id,
- struct bkey_i *k, enum btree_update_flags flags)
+ struct bkey_i *k, enum btree_iter_update_trigger_flags flags)
{
struct btree_iter iter;
- int ret;
-
bch2_trans_iter_init(trans, &iter, id, bkey_start_pos(&k->k),
- BTREE_ITER_CACHED|
- BTREE_ITER_INTENT);
- ret = bch2_btree_iter_traverse(&iter) ?:
- bch2_trans_update(trans, &iter, k, flags);
+ BTREE_ITER_intent|flags);
+ int ret = bch2_btree_iter_traverse(&iter) ?:
+ bch2_trans_update(trans, &iter, k, flags);
bch2_trans_iter_exit(trans, &iter);
return ret;
}
@@ -692,8 +695,8 @@ int bch2_btree_delete(struct btree_trans *trans,
int ret;
bch2_trans_iter_init(trans, &iter, btree, pos,
- BTREE_ITER_CACHED|
- BTREE_ITER_INTENT);
+ BTREE_ITER_cached|
+ BTREE_ITER_intent);
ret = bch2_btree_iter_traverse(&iter) ?:
bch2_btree_delete_at(trans, &iter, update_flags);
bch2_trans_iter_exit(trans, &iter);
@@ -711,7 +714,7 @@ int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
struct bkey_s_c k;
int ret = 0;
- bch2_trans_iter_init(trans, &iter, id, start, BTREE_ITER_INTENT);
+ bch2_trans_iter_init(trans, &iter, id, start, BTREE_ITER_intent);
while ((k = bch2_btree_iter_peek_upto(&iter, end)).k) {
struct disk_reservation disk_res =
bch2_disk_reservation_init(trans->c, 0);
@@ -739,7 +742,7 @@ int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
*/
delete.k.p = iter.pos;
- if (iter.flags & BTREE_ITER_IS_EXTENTS)
+ if (iter.flags & BTREE_ITER_is_extents)
bch2_key_resize(&delete.k,
bpos_min(end, k.k->p).offset -
iter.pos.offset);
@@ -798,7 +801,7 @@ int bch2_btree_bit_mod(struct btree_trans *trans, enum btree_id btree,
k->k.p = pos;
struct btree_iter iter;
- bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_INTENT);
+ bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_intent);
ret = bch2_btree_iter_traverse(&iter) ?:
bch2_trans_update(trans, &iter, k, 0);
@@ -846,7 +849,7 @@ __bch2_fs_log_msg(struct bch_fs *c, unsigned commit_flags, const char *fmt,
if (ret)
goto err;
- if (!test_bit(JOURNAL_STARTED, &c->journal.flags)) {
+ if (!test_bit(JOURNAL_running, &c->journal.flags)) {
ret = darray_make_room(&c->journal.early_journal_entries, jset_u64s(u64s));
if (ret)
goto err;
diff --git a/fs/bcachefs/btree_update.h b/fs/bcachefs/btree_update.h
index cc7c53e83f89..b4894e4d5447 100644
--- a/fs/bcachefs/btree_update.h
+++ b/fs/bcachefs/btree_update.h
@@ -44,16 +44,18 @@ enum bch_trans_commit_flags {
#undef x
};
+void bch2_trans_commit_flags_to_text(struct printbuf *, enum bch_trans_commit_flags);
+
int bch2_btree_delete_extent_at(struct btree_trans *, struct btree_iter *,
unsigned, unsigned);
int bch2_btree_delete_at(struct btree_trans *, struct btree_iter *, unsigned);
int bch2_btree_delete(struct btree_trans *, enum btree_id, struct bpos, unsigned);
int bch2_btree_insert_nonextent(struct btree_trans *, enum btree_id,
- struct bkey_i *, enum btree_update_flags);
+ struct bkey_i *, enum btree_iter_update_trigger_flags);
int bch2_btree_insert_trans(struct btree_trans *, enum btree_id, struct bkey_i *,
- enum btree_update_flags);
+ enum btree_iter_update_trigger_flags);
int bch2_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *,
struct disk_reservation *, int flags);
@@ -94,14 +96,14 @@ static inline int bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
}
int bch2_trans_update_extent_overwrite(struct btree_trans *, struct btree_iter *,
- enum btree_update_flags,
+ enum btree_iter_update_trigger_flags,
struct bkey_s_c, struct bkey_s_c);
int bch2_bkey_get_empty_slot(struct btree_trans *, struct btree_iter *,
enum btree_id, struct bpos);
int __must_check bch2_trans_update(struct btree_trans *, struct btree_iter *,
- struct bkey_i *, enum btree_update_flags);
+ struct bkey_i *, enum btree_iter_update_trigger_flags);
struct jset_entry *__bch2_trans_jset_entry_alloc(struct btree_trans *, unsigned);
@@ -276,7 +278,7 @@ static inline struct bkey_i *__bch2_bkey_get_mut_noupdate(struct btree_trans *tr
unsigned flags, unsigned type, unsigned min_bytes)
{
struct bkey_s_c k = __bch2_bkey_get_iter(trans, iter,
- btree_id, pos, flags|BTREE_ITER_INTENT, type);
+ btree_id, pos, flags|BTREE_ITER_intent, type);
struct bkey_i *ret = IS_ERR(k.k)
? ERR_CAST(k.k)
: __bch2_bkey_make_mut_noupdate(trans, k, 0, min_bytes);
@@ -299,7 +301,7 @@ static inline struct bkey_i *__bch2_bkey_get_mut(struct btree_trans *trans,
unsigned flags, unsigned type, unsigned min_bytes)
{
struct bkey_i *mut = __bch2_bkey_get_mut_noupdate(trans, iter,
- btree_id, pos, flags|BTREE_ITER_INTENT, type, min_bytes);
+ btree_id, pos, flags|BTREE_ITER_intent, type, min_bytes);
int ret;
if (IS_ERR(mut))
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index b2f5f2e50f7e..60b8544cea48 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -2,6 +2,7 @@
#include "bcachefs.h"
#include "alloc_foreground.h"
+#include "bkey_buf.h"
#include "bkey_methods.h"
#include "btree_cache.h"
#include "btree_gc.h"
@@ -18,82 +19,140 @@
#include "journal.h"
#include "journal_reclaim.h"
#include "keylist.h"
+#include "recovery_passes.h"
#include "replicas.h"
+#include "sb-members.h"
#include "super-io.h"
#include "trace.h"
#include <linux/random.h>
+static const char * const bch2_btree_update_modes[] = {
+#define x(t) #t,
+ BTREE_UPDATE_MODES()
+#undef x
+ NULL
+};
+
static int bch2_btree_insert_node(struct btree_update *, struct btree_trans *,
btree_path_idx_t, struct btree *, struct keylist *);
static void bch2_btree_update_add_new_node(struct btree_update *, struct btree *);
-static btree_path_idx_t get_unlocked_mut_path(struct btree_trans *trans,
- enum btree_id btree_id,
- unsigned level,
- struct bpos pos)
-{
- btree_path_idx_t path_idx = bch2_path_get(trans, btree_id, pos, level + 1, level,
- BTREE_ITER_NOPRESERVE|
- BTREE_ITER_INTENT, _RET_IP_);
- path_idx = bch2_btree_path_make_mut(trans, path_idx, true, _RET_IP_);
-
- struct btree_path *path = trans->paths + path_idx;
- bch2_btree_path_downgrade(trans, path);
- __bch2_btree_path_unlock(trans, path);
- return path_idx;
-}
-
-/* Debug code: */
-
/*
* Verify that child nodes correctly span parent node's range:
*/
-static void btree_node_interior_verify(struct bch_fs *c, struct btree *b)
+int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b)
{
-#ifdef CONFIG_BCACHEFS_DEBUG
- struct bpos next_node = b->data->min_key;
- struct btree_node_iter iter;
+ struct bch_fs *c = trans->c;
+ struct bpos node_min = b->key.k.type == KEY_TYPE_btree_ptr_v2
+ ? bkey_i_to_btree_ptr_v2(&b->key)->v.min_key
+ : b->data->min_key;
+ struct btree_and_journal_iter iter;
struct bkey_s_c k;
- struct bkey_s_c_btree_ptr_v2 bp;
- struct bkey unpacked;
- struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
+ struct printbuf buf = PRINTBUF;
+ struct bkey_buf prev;
+ int ret = 0;
- BUG_ON(!b->c.level);
+ BUG_ON(b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
+ !bpos_eq(bkey_i_to_btree_ptr_v2(&b->key)->v.min_key,
+ b->data->min_key));
+
+ if (b == btree_node_root(c, b)) {
+ if (!bpos_eq(b->data->min_key, POS_MIN)) {
+ printbuf_reset(&buf);
+ bch2_bpos_to_text(&buf, b->data->min_key);
+ need_fsck_err(c, btree_root_bad_min_key,
+ "btree root with incorrect min_key: %s", buf.buf);
+ goto topology_repair;
+ }
- if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
- return;
+ if (!bpos_eq(b->data->max_key, SPOS_MAX)) {
+ printbuf_reset(&buf);
+ bch2_bpos_to_text(&buf, b->data->max_key);
+ need_fsck_err(c, btree_root_bad_max_key,
+ "btree root with incorrect max_key: %s", buf.buf);
+ goto topology_repair;
+ }
+ }
+
+ if (!b->c.level)
+ return 0;
- bch2_btree_node_iter_init_from_start(&iter, b);
+ bch2_bkey_buf_init(&prev);
+ bkey_init(&prev.k->k);
+ bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
- while (1) {
- k = bch2_btree_node_iter_peek_unpack(&iter, b, &unpacked);
+ while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
if (k.k->type != KEY_TYPE_btree_ptr_v2)
- break;
- bp = bkey_s_c_to_btree_ptr_v2(k);
+ goto out;
- if (!bpos_eq(next_node, bp.v->min_key)) {
- bch2_dump_btree_node(c, b);
- bch2_bpos_to_text(&buf1, next_node);
- bch2_bpos_to_text(&buf2, bp.v->min_key);
- panic("expected next min_key %s got %s\n", buf1.buf, buf2.buf);
- }
+ struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
- bch2_btree_node_iter_advance(&iter, b);
+ struct bpos expected_min = bkey_deleted(&prev.k->k)
+ ? node_min
+ : bpos_successor(prev.k->k.p);
- if (bch2_btree_node_iter_end(&iter)) {
- if (!bpos_eq(k.k->p, b->key.k.p)) {
- bch2_dump_btree_node(c, b);
- bch2_bpos_to_text(&buf1, b->key.k.p);
- bch2_bpos_to_text(&buf2, k.k->p);
- panic("expected end %s got %s\n", buf1.buf, buf2.buf);
- }
- break;
+ if (!bpos_eq(expected_min, bp.v->min_key)) {
+ bch2_topology_error(c);
+
+ printbuf_reset(&buf);
+ prt_str(&buf, "end of prev node doesn't match start of next node\n"),
+ prt_printf(&buf, " in btree %s level %u node ",
+ bch2_btree_id_str(b->c.btree_id), b->c.level);
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
+ prt_str(&buf, "\n prev ");
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(prev.k));
+ prt_str(&buf, "\n next ");
+ bch2_bkey_val_to_text(&buf, c, k);
+
+ need_fsck_err(c, btree_node_topology_bad_min_key, "%s", buf.buf);
+ goto topology_repair;
}
- next_node = bpos_successor(k.k->p);
+ bch2_bkey_buf_reassemble(&prev, c, k);
+ bch2_btree_and_journal_iter_advance(&iter);
+ }
+
+ if (bkey_deleted(&prev.k->k)) {
+ bch2_topology_error(c);
+
+ printbuf_reset(&buf);
+ prt_str(&buf, "empty interior node\n");
+ prt_printf(&buf, " in btree %s level %u node ",
+ bch2_btree_id_str(b->c.btree_id), b->c.level);
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
+
+ need_fsck_err(c, btree_node_topology_empty_interior_node, "%s", buf.buf);
+ goto topology_repair;
+ } else if (!bpos_eq(prev.k->k.p, b->key.k.p)) {
+ bch2_topology_error(c);
+
+ printbuf_reset(&buf);
+ prt_str(&buf, "last child node doesn't end at end of parent node\n");
+ prt_printf(&buf, " in btree %s level %u node ",
+ bch2_btree_id_str(b->c.btree_id), b->c.level);
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
+ prt_str(&buf, "\n last key ");
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(prev.k));
+
+ need_fsck_err(c, btree_node_topology_bad_max_key, "%s", buf.buf);
+ goto topology_repair;
+ }
+out:
+fsck_err:
+ bch2_btree_and_journal_iter_exit(&iter);
+ bch2_bkey_buf_exit(&prev, c);
+ printbuf_exit(&buf);
+ return ret;
+topology_repair:
+ if ((c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology)) &&
+ c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology) {
+ bch2_inconsistent_error(c);
+ ret = -BCH_ERR_btree_need_topology_repair;
+ } else {
+ ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
}
-#endif
+ goto out;
}
/* Calculate ideal packed bkey format for new btree nodes: */
@@ -101,7 +160,6 @@ static void btree_node_interior_verify(struct bch_fs *c, struct btree *b)
static void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b)
{
struct bkey_packed *k;
- struct bset_tree *t;
struct bkey uk;
for_each_bset(b, t)
@@ -254,7 +312,7 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
struct open_buckets obs = { .nr = 0 };
struct bch_devs_list devs_have = (struct bch_devs_list) { 0 };
enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
- unsigned nr_reserve = watermark > BCH_WATERMARK_reclaim
+ unsigned nr_reserve = watermark < BCH_WATERMARK_reclaim
? BTREE_NODE_RESERVE
: 0;
int ret;
@@ -549,6 +607,26 @@ static void btree_update_add_key(struct btree_update *as,
bch2_keylist_push(keys);
}
+static bool btree_update_new_nodes_marked_sb(struct btree_update *as)
+{
+ for_each_keylist_key(&as->new_keys, k)
+ if (!bch2_dev_btree_bitmap_marked(as->c, bkey_i_to_s_c(k)))
+ return false;
+ return true;
+}
+
+static void btree_update_new_nodes_mark_sb(struct btree_update *as)
+{
+ struct bch_fs *c = as->c;
+
+ mutex_lock(&c->sb_lock);
+ for_each_keylist_key(&as->new_keys, k)
+ bch2_dev_btree_bitmap_mark(c, bkey_i_to_s_c(k));
+
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+}
+
/*
* The transactional part of an interior btree node update, where we journal the
* update we did to the interior node and update alloc info:
@@ -569,7 +647,7 @@ static int btree_update_nodes_written_trans(struct btree_trans *trans,
unsigned level = bkey_i_to_btree_ptr_v2(k)->v.mem_ptr;
ret = bch2_key_trigger_old(trans, as->btree_id, level, bkey_i_to_s_c(k),
- BTREE_TRIGGER_TRANSACTIONAL);
+ BTREE_TRIGGER_transactional);
if (ret)
return ret;
}
@@ -578,7 +656,7 @@ static int btree_update_nodes_written_trans(struct btree_trans *trans,
unsigned level = bkey_i_to_btree_ptr_v2(k)->v.mem_ptr;
ret = bch2_key_trigger_new(trans, as->btree_id, level, bkey_i_to_s(k),
- BTREE_TRIGGER_TRANSACTIONAL);
+ BTREE_TRIGGER_transactional);
if (ret)
return ret;
}
@@ -606,6 +684,9 @@ static void btree_update_nodes_written(struct btree_update *as)
if (ret)
goto err;
+ if (!btree_update_new_nodes_marked_sb(as))
+ btree_update_new_nodes_mark_sb(as);
+
/*
* Wait for any in flight writes to finish before we free the old nodes
* on disk:
@@ -638,7 +719,7 @@ static void btree_update_nodes_written(struct btree_update *as)
* which may require allocations as well.
*/
ret = commit_do(trans, &as->disk_res, &journal_seq,
- BCH_WATERMARK_reclaim|
+ BCH_WATERMARK_interior_updates|
BCH_TRANS_COMMIT_no_enospc|
BCH_TRANS_COMMIT_no_check_rw|
BCH_TRANS_COMMIT_journal_reclaim,
@@ -648,12 +729,13 @@ static void btree_update_nodes_written(struct btree_update *as)
bch2_fs_fatal_err_on(ret && !bch2_journal_error(&c->journal), c,
"%s", bch2_err_str(ret));
err:
- if (as->b) {
-
- b = as->b;
- btree_path_idx_t path_idx = get_unlocked_mut_path(trans,
- as->btree_id, b->c.level, b->key.k.p);
- struct btree_path *path = trans->paths + path_idx;
+ /*
+ * We have to be careful because another thread might be getting ready
+ * to free as->b and calling btree_update_reparent() on us - we'll
+ * recheck under btree_update_lock below:
+ */
+ b = READ_ONCE(as->b);
+ if (b) {
/*
* @b is the node we did the final insert into:
*
@@ -671,12 +753,16 @@ err:
* btree_node_lock_nopath() (the use of which is always suspect,
* we need to work on removing this in the future)
*
- * It should be, but get_unlocked_mut_path() -> bch2_path_get()
+ * It should be, but bch2_path_get_unlocked_mut() -> bch2_path_get()
* calls bch2_path_upgrade(), before we call path_make_mut(), so
* we may rarely end up with a locked path besides the one we
* have here:
*/
bch2_trans_unlock(trans);
+ bch2_trans_begin(trans);
+ btree_path_idx_t path_idx = bch2_path_get_unlocked_mut(trans,
+ as->btree_id, b->c.level, b->key.k.p);
+ struct btree_path *path = trans->paths + path_idx;
btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent);
mark_btree_node_locked(trans, path, b->c.level, BTREE_NODE_INTENT_LOCKED);
path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
@@ -794,15 +880,17 @@ static void btree_update_updated_node(struct btree_update *as, struct btree *b)
{
struct bch_fs *c = as->c;
- mutex_lock(&c->btree_interior_update_lock);
- list_add_tail(&as->unwritten_list, &c->btree_interior_updates_unwritten);
-
- BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
+ BUG_ON(as->mode != BTREE_UPDATE_none);
+ BUG_ON(as->update_level_end < b->c.level);
BUG_ON(!btree_node_dirty(b));
BUG_ON(!b->c.level);
- as->mode = BTREE_INTERIOR_UPDATING_NODE;
+ mutex_lock(&c->btree_interior_update_lock);
+ list_add_tail(&as->unwritten_list, &c->btree_interior_updates_unwritten);
+
+ as->mode = BTREE_UPDATE_node;
as->b = b;
+ as->update_level_end = b->c.level;
set_btree_node_write_blocked(b);
list_add(&as->write_blocked_list, &b->write_blocked);
@@ -824,7 +912,7 @@ static void btree_update_reparent(struct btree_update *as,
lockdep_assert_held(&c->btree_interior_update_lock);
child->b = NULL;
- child->mode = BTREE_INTERIOR_UPDATING_AS;
+ child->mode = BTREE_UPDATE_update;
bch2_journal_pin_copy(&c->journal, &as->journal, &child->journal,
bch2_update_reparent_journal_pin_flush);
@@ -835,7 +923,7 @@ static void btree_update_updated_root(struct btree_update *as, struct btree *b)
struct bkey_i *insert = &b->key;
struct bch_fs *c = as->c;
- BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
+ BUG_ON(as->mode != BTREE_UPDATE_none);
BUG_ON(as->journal_u64s + jset_u64s(insert->k.u64s) >
ARRAY_SIZE(as->journal_entries));
@@ -849,7 +937,7 @@ static void btree_update_updated_root(struct btree_update *as, struct btree *b)
mutex_lock(&c->btree_interior_update_lock);
list_add_tail(&as->unwritten_list, &c->btree_interior_updates_unwritten);
- as->mode = BTREE_INTERIOR_UPDATING_ROOT;
+ as->mode = BTREE_UPDATE_root;
mutex_unlock(&c->btree_interior_update_lock);
}
@@ -1027,7 +1115,7 @@ static void bch2_btree_update_done(struct btree_update *as, struct btree_trans *
struct bch_fs *c = as->c;
u64 start_time = as->start_time;
- BUG_ON(as->mode == BTREE_INTERIOR_NO_UPDATE);
+ BUG_ON(as->mode == BTREE_UPDATE_none);
if (as->took_gc_lock)
up_read(&as->c->gc_lock);
@@ -1044,7 +1132,7 @@ static void bch2_btree_update_done(struct btree_update *as, struct btree_trans *
static struct btree_update *
bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
- unsigned level, bool split, unsigned flags)
+ unsigned level_start, bool split, unsigned flags)
{
struct bch_fs *c = trans->c;
struct btree_update *as;
@@ -1052,7 +1140,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
int disk_res_flags = (flags & BCH_TRANS_COMMIT_no_enospc)
? BCH_DISK_RESERVATION_NOFAIL : 0;
unsigned nr_nodes[2] = { 0, 0 };
- unsigned update_level = level;
+ unsigned level_end = level_start;
enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
int ret = 0;
u32 restart_count = trans->restart_count;
@@ -1067,34 +1155,29 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
flags &= ~BCH_WATERMARK_MASK;
flags |= watermark;
- if (watermark < c->journal.watermark) {
- struct journal_res res = { 0 };
- unsigned journal_flags = watermark|JOURNAL_RES_GET_CHECK;
-
- if ((flags & BCH_TRANS_COMMIT_journal_reclaim) &&
- watermark != BCH_WATERMARK_reclaim)
- journal_flags |= JOURNAL_RES_GET_NONBLOCK;
+ if (watermark < BCH_WATERMARK_reclaim &&
+ test_bit(JOURNAL_space_low, &c->journal.flags)) {
+ if (flags & BCH_TRANS_COMMIT_journal_reclaim)
+ return ERR_PTR(-BCH_ERR_journal_reclaim_would_deadlock);
ret = drop_locks_do(trans,
- bch2_journal_res_get(&c->journal, &res, 1, journal_flags));
- if (bch2_err_matches(ret, BCH_ERR_operation_blocked))
- ret = -BCH_ERR_journal_reclaim_would_deadlock;
+ ({ wait_event(c->journal.wait, !test_bit(JOURNAL_space_low, &c->journal.flags)); 0; }));
if (ret)
return ERR_PTR(ret);
}
while (1) {
- nr_nodes[!!update_level] += 1 + split;
- update_level++;
+ nr_nodes[!!level_end] += 1 + split;
+ level_end++;
- ret = bch2_btree_path_upgrade(trans, path, update_level + 1);
+ ret = bch2_btree_path_upgrade(trans, path, level_end + 1);
if (ret)
return ERR_PTR(ret);
- if (!btree_path_node(path, update_level)) {
+ if (!btree_path_node(path, level_end)) {
/* Allocating new root? */
nr_nodes[1] += split;
- update_level = BTREE_MAX_DEPTH;
+ level_end = BTREE_MAX_DEPTH;
break;
}
@@ -1102,11 +1185,11 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
* Always check for space for two keys, even if we won't have to
* split at prior level - it might have been a merge instead:
*/
- if (bch2_btree_node_insert_fits(path->l[update_level].b,
+ if (bch2_btree_node_insert_fits(path->l[level_end].b,
BKEY_BTREE_PTR_U64s_MAX * 2))
break;
- split = path->l[update_level].b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c);
+ split = path->l[level_end].b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c);
}
if (!down_read_trylock(&c->gc_lock)) {
@@ -1120,13 +1203,15 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOFS);
memset(as, 0, sizeof(*as));
closure_init(&as->cl, NULL);
- as->c = c;
- as->start_time = start_time;
- as->ip_started = _RET_IP_;
- as->mode = BTREE_INTERIOR_NO_UPDATE;
- as->took_gc_lock = true;
- as->btree_id = path->btree_id;
- as->update_level = update_level;
+ as->c = c;
+ as->start_time = start_time;
+ as->ip_started = _RET_IP_;
+ as->mode = BTREE_UPDATE_none;
+ as->flags = flags;
+ as->took_gc_lock = true;
+ as->btree_id = path->btree_id;
+ as->update_level_start = level_start;
+ as->update_level_end = level_end;
INIT_LIST_HEAD(&as->list);
INIT_LIST_HEAD(&as->unwritten_list);
INIT_LIST_HEAD(&as->write_blocked_list);
@@ -1168,7 +1253,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
*/
if (bch2_err_matches(ret, ENOSPC) &&
(flags & BCH_TRANS_COMMIT_journal_reclaim) &&
- watermark != BCH_WATERMARK_reclaim) {
+ watermark < BCH_WATERMARK_reclaim) {
ret = -BCH_ERR_journal_reclaim_would_deadlock;
goto err;
}
@@ -1220,23 +1305,29 @@ static void bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b)
bch2_recalc_btree_reserve(c);
}
-static void bch2_btree_set_root(struct btree_update *as,
- struct btree_trans *trans,
- struct btree_path *path,
- struct btree *b)
+static int bch2_btree_set_root(struct btree_update *as,
+ struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree *b,
+ bool nofail)
{
struct bch_fs *c = as->c;
- struct btree *old;
trace_and_count(c, btree_node_set_root, trans, b);
- old = btree_node_root(c, b);
+ struct btree *old = btree_node_root(c, b);
/*
* Ensure no one is using the old root while we switch to the
* new root:
*/
- bch2_btree_node_lock_write_nofail(trans, path, &old->c);
+ if (nofail) {
+ bch2_btree_node_lock_write_nofail(trans, path, &old->c);
+ } else {
+ int ret = bch2_btree_node_lock_write(trans, path, &old->c);
+ if (ret)
+ return ret;
+ }
bch2_btree_set_root_inmem(c, b);
@@ -1250,6 +1341,7 @@ static void bch2_btree_set_root(struct btree_update *as,
* depend on the new root would have to update the new root.
*/
bch2_btree_node_unlock_write(trans, path, old);
+ return 0;
}
/* Interior node updates: */
@@ -1269,7 +1361,7 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as,
BUG_ON(insert->k.type == KEY_TYPE_btree_ptr_v2 &&
!btree_ptr_sectors_written(insert));
- if (unlikely(!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)))
+ if (unlikely(!test_bit(JOURNAL_replay_done, &c->journal.flags)))
bch2_journal_key_overwritten(c, b->c.btree_id, b->c.level, insert->k.p);
if (bch2_bkey_invalid(c, bkey_i_to_s_c(insert),
@@ -1316,12 +1408,12 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as,
}
static void
-__bch2_btree_insert_keys_interior(struct btree_update *as,
- struct btree_trans *trans,
- struct btree_path *path,
- struct btree *b,
- struct btree_node_iter node_iter,
- struct keylist *keys)
+bch2_btree_insert_keys_interior(struct btree_update *as,
+ struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree *b,
+ struct btree_node_iter node_iter,
+ struct keylist *keys)
{
struct bkey_i *insert = bch2_keylist_front(keys);
struct bkey_packed *k;
@@ -1380,9 +1472,16 @@ static void __btree_split_node(struct btree_update *as,
if (bkey_deleted(k))
continue;
+ uk = bkey_unpack_key(b, k);
+
+ if (b->c.level &&
+ u64s < n1_u64s &&
+ u64s + k->u64s >= n1_u64s &&
+ bch2_key_deleted_in_journal(trans, b->c.btree_id, b->c.level, uk.p))
+ n1_u64s += k->u64s;
+
i = u64s >= n1_u64s;
u64s += k->u64s;
- uk = bkey_unpack_key(b, k);
if (!i)
n1_pos = uk.p;
bch2_bkey_format_add_key(&format[i], &uk);
@@ -1441,8 +1540,7 @@ static void __btree_split_node(struct btree_update *as,
bch2_verify_btree_nr_keys(n[i]);
- if (b->c.level)
- btree_node_interior_verify(as->c, n[i]);
+ BUG_ON(bch2_btree_node_check_topology(trans, n[i]));
}
}
@@ -1471,9 +1569,9 @@ static void btree_split_insert_keys(struct btree_update *as,
bch2_btree_node_iter_init(&node_iter, b, &bch2_keylist_front(keys)->k.p);
- __bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys);
+ bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys);
- btree_node_interior_verify(as->c, b);
+ BUG_ON(bch2_btree_node_check_topology(trans, b));
}
}
@@ -1488,9 +1586,14 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
u64 start_time = local_clock();
int ret = 0;
+ bch2_verify_btree_nr_keys(b);
BUG_ON(!parent && (b != btree_node_root(c, b)));
BUG_ON(parent && !btree_node_intent_locked(trans->paths + path, b->c.level + 1));
+ ret = bch2_btree_node_check_topology(trans, b);
+ if (ret)
+ return ret;
+
bch2_btree_interior_update_will_free_node(as, b);
if (b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c)) {
@@ -1517,12 +1620,12 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
six_unlock_write(&n2->c.lock);
six_unlock_write(&n1->c.lock);
- path1 = get_unlocked_mut_path(trans, as->btree_id, n1->c.level, n1->key.k.p);
+ path1 = bch2_path_get_unlocked_mut(trans, as->btree_id, n1->c.level, n1->key.k.p);
six_lock_increment(&n1->c.lock, SIX_LOCK_intent);
mark_btree_node_locked(trans, trans->paths + path1, n1->c.level, BTREE_NODE_INTENT_LOCKED);
bch2_btree_path_level_init(trans, trans->paths + path1, n1);
- path2 = get_unlocked_mut_path(trans, as->btree_id, n2->c.level, n2->key.k.p);
+ path2 = bch2_path_get_unlocked_mut(trans, as->btree_id, n2->c.level, n2->key.k.p);
six_lock_increment(&n2->c.lock, SIX_LOCK_intent);
mark_btree_node_locked(trans, trans->paths + path2, n2->c.level, BTREE_NODE_INTENT_LOCKED);
bch2_btree_path_level_init(trans, trans->paths + path2, n2);
@@ -1567,7 +1670,7 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
bch2_btree_update_add_new_node(as, n1);
six_unlock_write(&n1->c.lock);
- path1 = get_unlocked_mut_path(trans, as->btree_id, n1->c.level, n1->key.k.p);
+ path1 = bch2_path_get_unlocked_mut(trans, as->btree_id, n1->c.level, n1->key.k.p);
six_lock_increment(&n1->c.lock, SIX_LOCK_intent);
mark_btree_node_locked(trans, trans->paths + path1, n1->c.level, BTREE_NODE_INTENT_LOCKED);
bch2_btree_path_level_init(trans, trans->paths + path1, n1);
@@ -1581,15 +1684,16 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
if (parent) {
/* Split a non root node */
ret = bch2_btree_insert_node(as, trans, path, parent, &as->parent_keys);
- if (ret)
- goto err;
} else if (n3) {
- bch2_btree_set_root(as, trans, trans->paths + path, n3);
+ ret = bch2_btree_set_root(as, trans, trans->paths + path, n3, false);
} else {
/* Root filled up but didn't need to be split */
- bch2_btree_set_root(as, trans, trans->paths + path, n1);
+ ret = bch2_btree_set_root(as, trans, trans->paths + path, n1, false);
}
+ if (ret)
+ goto err;
+
if (n3) {
bch2_btree_update_get_open_buckets(as, n3);
bch2_btree_node_write(c, n3, SIX_LOCK_intent, 0);
@@ -1646,27 +1750,6 @@ err:
goto out;
}
-static void
-bch2_btree_insert_keys_interior(struct btree_update *as,
- struct btree_trans *trans,
- struct btree_path *path,
- struct btree *b,
- struct keylist *keys)
-{
- struct btree_path *linked;
- unsigned i;
-
- __bch2_btree_insert_keys_interior(as, trans, path, b,
- path->l[b->c.level].iter, keys);
-
- btree_update_updated_node(as, b);
-
- trans_for_each_path_with_node(trans, b, linked, i)
- bch2_btree_node_iter_peek(&linked->l[b->c.level].iter, b);
-
- bch2_trans_verify_paths(trans);
-}
-
/**
* bch2_btree_insert_node - insert bkeys into a given btree node
*
@@ -1687,7 +1770,8 @@ static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *t
struct keylist *keys)
{
struct bch_fs *c = as->c;
- struct btree_path *path = trans->paths + path_idx;
+ struct btree_path *path = trans->paths + path_idx, *linked;
+ unsigned i;
int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s);
int old_live_u64s = b->nr.live_u64s;
int live_u64s_added, u64s_added;
@@ -1710,9 +1794,19 @@ static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *t
goto split;
}
- btree_node_interior_verify(c, b);
+ ret = bch2_btree_node_check_topology(trans, b);
+ if (ret) {
+ bch2_btree_node_unlock_write(trans, path, b);
+ return ret;
+ }
+
+ bch2_btree_insert_keys_interior(as, trans, path, b,
+ path->l[b->c.level].iter, keys);
- bch2_btree_insert_keys_interior(as, trans, path, b, keys);
+ trans_for_each_path_with_node(trans, b, linked, i)
+ bch2_btree_node_iter_peek(&linked->l[b->c.level].iter, b);
+
+ bch2_trans_verify_paths(trans);
live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s;
@@ -1726,16 +1820,17 @@ static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *t
bch2_maybe_compact_whiteouts(c, b))
bch2_trans_node_reinit_iter(trans, b);
+ btree_update_updated_node(as, b);
bch2_btree_node_unlock_write(trans, path, b);
- btree_node_interior_verify(c, b);
+ BUG_ON(bch2_btree_node_check_topology(trans, b));
return 0;
split:
/*
* We could attempt to avoid the transaction restart, by calling
* bch2_btree_path_upgrade() and allocating more nodes:
*/
- if (b->c.level >= as->update_level) {
+ if (b->c.level >= as->update_level_end) {
trace_and_count(c, trans_restart_split_race, trans, _THIS_IP_, b);
return btree_trans_restart(trans, BCH_ERR_transaction_restart_split_race);
}
@@ -1801,7 +1896,9 @@ static void __btree_increase_depth(struct btree_update *as, struct btree_trans *
bch2_keylist_add(&as->parent_keys, &b->key);
btree_split_insert_keys(as, trans, path_idx, n, &as->parent_keys);
- bch2_btree_set_root(as, trans, path, n);
+ int ret = bch2_btree_set_root(as, trans, path, n, true);
+ BUG_ON(ret);
+
bch2_btree_update_get_open_buckets(as, n);
bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
bch2_trans_node_add(trans, path, n);
@@ -1818,9 +1915,12 @@ int bch2_btree_increase_depth(struct btree_trans *trans, btree_path_idx_t path,
{
struct bch_fs *c = trans->c;
struct btree *b = bch2_btree_id_root(c, trans->paths[path].btree_id)->b;
+
+ if (btree_node_fake(b))
+ return bch2_btree_split_leaf(trans, path, flags);
+
struct btree_update *as =
- bch2_btree_update_start(trans, trans->paths + path,
- b->c.level, true, flags);
+ bch2_btree_update_start(trans, trans->paths + path, b->c.level, true, flags);
if (IS_ERR(as))
return PTR_ERR(as);
@@ -1848,9 +1948,27 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
u64 start_time = local_clock();
int ret = 0;
+ bch2_trans_verify_not_in_restart(trans);
+ bch2_trans_verify_not_unlocked(trans);
BUG_ON(!trans->paths[path].should_be_locked);
BUG_ON(!btree_node_locked(&trans->paths[path], level));
+ /*
+ * Work around a deadlock caused by the btree write buffer not doing
+ * merges and leaving tons of merges for us to do - we really don't need
+ * to be doing merges at all from the interior update path, and if the
+ * interior update path is generating too many new interior updates we
+ * deadlock:
+ */
+ if ((flags & BCH_WATERMARK_MASK) == BCH_WATERMARK_interior_updates)
+ return 0;
+
+ if ((flags & BCH_WATERMARK_MASK) <= BCH_WATERMARK_reclaim) {
+ flags &= ~BCH_WATERMARK_MASK;
+ flags |= BCH_WATERMARK_btree;
+ flags |= BCH_TRANS_COMMIT_journal_reclaim;
+ }
+
b = trans->paths[path].l[level].b;
if ((sib == btree_prev_sib && bpos_eq(b->data->min_key, POS_MIN)) ||
@@ -1864,7 +1982,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
: bpos_successor(b->data->max_key);
sib_path = bch2_path_get(trans, btree, sib_pos,
- U8_MAX, level, BTREE_ITER_INTENT, _THIS_IP_);
+ U8_MAX, level, BTREE_ITER_intent, _THIS_IP_);
ret = bch2_btree_path_traverse(trans, sib_path, false);
if (ret)
goto err;
@@ -1957,7 +2075,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
bch2_btree_update_add_new_node(as, n);
six_unlock_write(&n->c.lock);
- new_path = get_unlocked_mut_path(trans, btree, n->c.level, n->key.k.p);
+ new_path = bch2_path_get_unlocked_mut(trans, btree, n->c.level, n->key.k.p);
six_lock_increment(&n->c.lock, SIX_LOCK_intent);
mark_btree_node_locked(trans, trans->paths + new_path, n->c.level, BTREE_NODE_INTENT_LOCKED);
bch2_btree_path_level_init(trans, trans->paths + new_path, n);
@@ -1996,6 +2114,10 @@ err:
bch2_path_put(trans, new_path, true);
bch2_path_put(trans, sib_path, true);
bch2_trans_verify_locks(trans);
+ if (ret == -BCH_ERR_journal_reclaim_would_deadlock)
+ ret = 0;
+ if (!ret)
+ ret = bch2_trans_relock(trans);
return ret;
err_free_update:
bch2_btree_node_free_never_used(as, trans, n);
@@ -2031,7 +2153,7 @@ int bch2_btree_node_rewrite(struct btree_trans *trans,
bch2_btree_update_add_new_node(as, n);
six_unlock_write(&n->c.lock);
- new_path = get_unlocked_mut_path(trans, iter->btree_id, n->c.level, n->key.k.p);
+ new_path = bch2_path_get_unlocked_mut(trans, iter->btree_id, n->c.level, n->key.k.p);
six_lock_increment(&n->c.lock, SIX_LOCK_intent);
mark_btree_node_locked(trans, trans->paths + new_path, n->c.level, BTREE_NODE_INTENT_LOCKED);
bch2_btree_path_level_init(trans, trans->paths + new_path, n);
@@ -2041,12 +2163,13 @@ int bch2_btree_node_rewrite(struct btree_trans *trans,
if (parent) {
bch2_keylist_add(&as->parent_keys, &n->key);
ret = bch2_btree_insert_node(as, trans, iter->path, parent, &as->parent_keys);
- if (ret)
- goto err;
} else {
- bch2_btree_set_root(as, trans, btree_iter_path(trans, iter), n);
+ ret = bch2_btree_set_root(as, trans, btree_iter_path(trans, iter), n, false);
}
+ if (ret)
+ goto err;
+
bch2_btree_update_get_open_buckets(as, n);
bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
@@ -2213,10 +2336,10 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
if (!skip_triggers) {
ret = bch2_key_trigger_old(trans, b->c.btree_id, b->c.level + 1,
bkey_i_to_s_c(&b->key),
- BTREE_TRIGGER_TRANSACTIONAL) ?:
+ BTREE_TRIGGER_transactional) ?:
bch2_key_trigger_new(trans, b->c.btree_id, b->c.level + 1,
bkey_i_to_s(new_key),
- BTREE_TRIGGER_TRANSACTIONAL);
+ BTREE_TRIGGER_transactional);
if (ret)
return ret;
}
@@ -2233,7 +2356,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
bch2_trans_copy_iter(&iter2, iter);
iter2.path = bch2_btree_path_make_mut(trans, iter2.path,
- iter2.flags & BTREE_ITER_INTENT,
+ iter2.flags & BTREE_ITER_intent,
_THIS_IP_);
struct btree_path *path2 = btree_iter_path(trans, &iter2);
@@ -2245,7 +2368,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
trans->paths_sorted = false;
ret = bch2_btree_iter_traverse(&iter2) ?:
- bch2_trans_update(trans, &iter2, new_key, BTREE_TRIGGER_NORUN);
+ bch2_trans_update(trans, &iter2, new_key, BTREE_TRIGGER_norun);
if (ret)
goto err;
} else {
@@ -2353,7 +2476,7 @@ int bch2_btree_node_update_key_get_iter(struct btree_trans *trans,
bch2_trans_node_iter_init(trans, &iter, b->c.btree_id, b->key.k.p,
BTREE_MAX_DEPTH, b->c.level,
- BTREE_ITER_INTENT);
+ BTREE_ITER_intent);
ret = bch2_btree_iter_traverse(&iter);
if (ret)
goto out;
@@ -2367,7 +2490,6 @@ int bch2_btree_node_update_key_get_iter(struct btree_trans *trans,
BUG_ON(!btree_node_hashed(b));
- struct bch_extent_ptr *ptr;
bch2_bkey_drop_ptrs(bkey_i_to_s(new_key), ptr,
!bch2_bkey_has_device(bkey_i_to_s(&b->key), ptr->dev));
@@ -2391,7 +2513,7 @@ void bch2_btree_set_root_for_read(struct bch_fs *c, struct btree *b)
bch2_btree_set_root_inmem(c, b);
}
-static int __bch2_btree_root_alloc(struct btree_trans *trans, enum btree_id id)
+int bch2_btree_root_alloc_fake_trans(struct btree_trans *trans, enum btree_id id, unsigned level)
{
struct bch_fs *c = trans->c;
struct closure cl;
@@ -2410,7 +2532,7 @@ static int __bch2_btree_root_alloc(struct btree_trans *trans, enum btree_id id)
set_btree_node_fake(b);
set_btree_node_need_rewrite(b);
- b->c.level = 0;
+ b->c.level = level;
b->c.btree_id = id;
bkey_btree_ptr_init(&b->key);
@@ -2437,9 +2559,24 @@ static int __bch2_btree_root_alloc(struct btree_trans *trans, enum btree_id id)
return 0;
}
-void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
+void bch2_btree_root_alloc_fake(struct bch_fs *c, enum btree_id id, unsigned level)
{
- bch2_trans_run(c, __bch2_btree_root_alloc(trans, id));
+ bch2_trans_run(c, bch2_btree_root_alloc_fake_trans(trans, id, level));
+}
+
+static void bch2_btree_update_to_text(struct printbuf *out, struct btree_update *as)
+{
+ prt_printf(out, "%ps: ", (void *) as->ip_started);
+ bch2_trans_commit_flags_to_text(out, as->flags);
+
+ prt_printf(out, " btree=%s l=%u-%u mode=%s nodes_written=%u cl.remaining=%u journal_seq=%llu\n",
+ bch2_btree_id_str(as->btree_id),
+ as->update_level_start,
+ as->update_level_end,
+ bch2_btree_update_modes[as->mode],
+ as->nodes_written,
+ closure_nr_remaining(&as->cl),
+ as->journal.seq);
}
void bch2_btree_updates_to_text(struct printbuf *out, struct bch_fs *c)
@@ -2448,12 +2585,7 @@ void bch2_btree_updates_to_text(struct printbuf *out, struct bch_fs *c)
mutex_lock(&c->btree_interior_update_lock);
list_for_each_entry(as, &c->btree_interior_update_list, list)
- prt_printf(out, "%ps: mode=%u nodes_written=%u cl.remaining=%u journal_seq=%llu\n",
- (void *) as->ip_started,
- as->mode,
- as->nodes_written,
- closure_nr_remaining(&as->cl),
- as->journal.seq);
+ bch2_btree_update_to_text(out, as);
mutex_unlock(&c->btree_interior_update_lock);
}
diff --git a/fs/bcachefs/btree_update_interior.h b/fs/bcachefs/btree_update_interior.h
index f651dd48aaa0..b5b76ce01cfc 100644
--- a/fs/bcachefs/btree_update_interior.h
+++ b/fs/bcachefs/btree_update_interior.h
@@ -10,6 +10,20 @@
#define BTREE_UPDATE_JOURNAL_RES (BTREE_UPDATE_NODES_MAX * (BKEY_BTREE_PTR_U64s_MAX + 1))
+int bch2_btree_node_check_topology(struct btree_trans *, struct btree *);
+
+#define BTREE_UPDATE_MODES() \
+ x(none) \
+ x(node) \
+ x(root) \
+ x(update)
+
+enum btree_update_mode {
+#define x(n) BTREE_UPDATE_##n,
+ BTREE_UPDATE_MODES()
+#undef x
+};
+
/*
* Tracks an in progress split/rewrite of a btree node and the update to the
* parent node:
@@ -37,24 +51,19 @@ struct btree_update {
struct list_head list;
struct list_head unwritten_list;
- /* What kind of update are we doing? */
- enum {
- BTREE_INTERIOR_NO_UPDATE,
- BTREE_INTERIOR_UPDATING_NODE,
- BTREE_INTERIOR_UPDATING_ROOT,
- BTREE_INTERIOR_UPDATING_AS,
- } mode;
-
+ enum btree_update_mode mode;
+ enum bch_trans_commit_flags flags;
unsigned nodes_written:1;
unsigned took_gc_lock:1;
enum btree_id btree_id;
- unsigned update_level;
+ unsigned update_level_start;
+ unsigned update_level_end;
struct disk_reservation disk_res;
/*
- * BTREE_INTERIOR_UPDATING_NODE:
+ * BTREE_UPDATE_node:
* The update that made the new nodes visible was a regular update to an
* existing interior node - @b. We can't write out the update to @b
* until the new nodes we created are finished writing, so we block @b
@@ -135,6 +144,9 @@ static inline int bch2_foreground_maybe_merge_sibling(struct btree_trans *trans,
EBUG_ON(!btree_node_locked(path, level));
+ if (bch2_btree_node_merging_disabled)
+ return 0;
+
b = path->l[level].b;
if (b->sib_u64s[sib] > trans->c->btree_foreground_merge_threshold)
return 0;
@@ -163,7 +175,9 @@ int bch2_btree_node_update_key_get_iter(struct btree_trans *, struct btree *,
struct bkey_i *, unsigned, bool);
void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *);
-void bch2_btree_root_alloc(struct bch_fs *, enum btree_id);
+
+int bch2_btree_root_alloc_fake_trans(struct btree_trans *, enum btree_id, unsigned);
+void bch2_btree_root_alloc_fake(struct bch_fs *, enum btree_id, unsigned);
static inline unsigned btree_update_reserve_required(struct bch_fs *c,
struct btree *b)
diff --git a/fs/bcachefs/btree_write_buffer.c b/fs/bcachefs/btree_write_buffer.c
index 5cbad8445782..75c8a196b3f6 100644
--- a/fs/bcachefs/btree_write_buffer.c
+++ b/fs/bcachefs/btree_write_buffer.c
@@ -11,6 +11,7 @@
#include "journal_reclaim.h"
#include <linux/prefetch.h>
+#include <linux/sort.h>
static int bch2_btree_write_buffer_journal_flush(struct journal *,
struct journal_entry_pin *, u64);
@@ -46,6 +47,14 @@ static inline bool wb_key_ref_cmp(const struct wb_key_ref *l, const struct wb_ke
#endif
}
+static int wb_key_seq_cmp(const void *_l, const void *_r)
+{
+ const struct btree_write_buffered_key *l = _l;
+ const struct btree_write_buffered_key *r = _r;
+
+ return cmp_int(l->journal_seq, r->journal_seq);
+}
+
/* Compare excluding idx, the low 24 bits: */
static inline bool wb_key_eq(const void *_l, const void *_r)
{
@@ -113,7 +122,7 @@ static noinline int wb_flush_one_slowpath(struct btree_trans *trans,
trans->journal_res.seq = wb->journal_seq;
return bch2_trans_update(trans, iter, &wb->k,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
+ BTREE_UPDATE_internal_snapshot_node) ?:
bch2_trans_commit(trans, NULL, NULL,
BCH_TRANS_COMMIT_no_enospc|
BCH_TRANS_COMMIT_no_check_rw|
@@ -182,13 +191,13 @@ btree_write_buffered_insert(struct btree_trans *trans,
int ret;
bch2_trans_iter_init(trans, &iter, wb->btree, bkey_start_pos(&wb->k.k),
- BTREE_ITER_CACHED|BTREE_ITER_INTENT);
+ BTREE_ITER_cached|BTREE_ITER_intent);
trans->journal_res.seq = wb->journal_seq;
ret = bch2_btree_iter_traverse(&iter) ?:
bch2_trans_update(trans, &iter, &wb->k,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ BTREE_UPDATE_internal_snapshot_node);
bch2_trans_iter_exit(trans, &iter);
return ret;
}
@@ -307,13 +316,23 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
bpos_gt(k->k.k.p, path->l[0].b->key.k.p)) {
bch2_btree_node_unlock_write(trans, path, path->l[0].b);
write_locked = false;
+
+ ret = lockrestart_do(trans,
+ bch2_btree_iter_traverse(&iter) ?:
+ bch2_foreground_maybe_merge(trans, iter.path, 0,
+ BCH_WATERMARK_reclaim|
+ BCH_TRANS_COMMIT_journal_reclaim|
+ BCH_TRANS_COMMIT_no_check_rw|
+ BCH_TRANS_COMMIT_no_enospc));
+ if (ret)
+ goto err;
}
}
if (!iter.path || iter.btree_id != k->btree) {
bch2_trans_iter_exit(trans, &iter);
bch2_trans_iter_init(trans, &iter, k->btree, k->k.k.p,
- BTREE_ITER_INTENT|BTREE_ITER_ALL_SNAPSHOTS);
+ BTREE_ITER_intent|BTREE_ITER_all_snapshots);
}
bch2_btree_iter_set_pos(&iter, k->k.k.p);
@@ -357,6 +376,11 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
*/
trace_and_count(c, write_buffer_flush_slowpath, trans, slowpath, wb->flushing.keys.nr);
+ sort(wb->flushing.keys.data,
+ wb->flushing.keys.nr,
+ sizeof(wb->flushing.keys.data[0]),
+ wb_key_seq_cmp, NULL);
+
darray_for_each(wb->flushing.keys, i) {
if (!i->journal_seq)
continue;
@@ -368,10 +392,10 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
ret = commit_do(trans, NULL, NULL,
BCH_WATERMARK_reclaim|
+ BCH_TRANS_COMMIT_journal_reclaim|
BCH_TRANS_COMMIT_no_check_rw|
BCH_TRANS_COMMIT_no_enospc|
- BCH_TRANS_COMMIT_no_journal_res|
- BCH_TRANS_COMMIT_journal_reclaim,
+ BCH_TRANS_COMMIT_no_journal_res ,
btree_write_buffered_insert(trans, i));
if (ret)
goto err;
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 96edf2c34d43..e28d28ac2a13 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -274,25 +274,14 @@ void bch2_dev_usage_init(struct bch_dev *ca)
void bch2_dev_usage_to_text(struct printbuf *out, struct bch_dev_usage *usage)
{
- prt_tab(out);
- prt_str(out, "buckets");
- prt_tab_rjust(out);
- prt_str(out, "sectors");
- prt_tab_rjust(out);
- prt_str(out, "fragmented");
- prt_tab_rjust(out);
- prt_newline(out);
+ prt_printf(out, "\tbuckets\rsectors\rfragmented\r\n");
for (unsigned i = 0; i < BCH_DATA_NR; i++) {
bch2_prt_data_type(out, i);
- prt_tab(out);
- prt_u64(out, usage->d[i].buckets);
- prt_tab_rjust(out);
- prt_u64(out, usage->d[i].sectors);
- prt_tab_rjust(out);
- prt_u64(out, usage->d[i].fragmented);
- prt_tab_rjust(out);
- prt_newline(out);
+ prt_printf(out, "\t%llu\r%llu\r%llu\r\n",
+ usage->d[i].buckets,
+ usage->d[i].sectors,
+ usage->d[i].fragmented);
}
}
@@ -329,26 +318,6 @@ void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
preempt_enable();
}
-static inline struct bch_alloc_v4 bucket_m_to_alloc(struct bucket b)
-{
- return (struct bch_alloc_v4) {
- .gen = b.gen,
- .data_type = b.data_type,
- .dirty_sectors = b.dirty_sectors,
- .cached_sectors = b.cached_sectors,
- .stripe = b.stripe,
- };
-}
-
-void bch2_dev_usage_update_m(struct bch_fs *c, struct bch_dev *ca,
- struct bucket *old, struct bucket *new)
-{
- struct bch_alloc_v4 old_a = bucket_m_to_alloc(*old);
- struct bch_alloc_v4 new_a = bucket_m_to_alloc(*new);
-
- bch2_dev_usage_update(c, ca, &old_a, &new_a, 0, true);
-}
-
static inline int __update_replicas(struct bch_fs *c,
struct bch_fs_usage *fs_usage,
struct bch_replicas_entry_v1 *r,
@@ -496,78 +465,276 @@ int bch2_update_cached_sectors_list(struct btree_trans *trans, unsigned dev, s64
return bch2_update_replicas_list(trans, &r.e, sectors);
}
-int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
- size_t b, enum bch_data_type data_type,
- unsigned sectors, struct gc_pos pos,
- unsigned flags)
+int bch2_check_fix_ptrs(struct btree_trans *trans,
+ enum btree_id btree, unsigned level, struct bkey_s_c k,
+ enum btree_iter_update_trigger_flags flags)
{
- struct bucket old, new, *g;
+ struct bch_fs *c = trans->c;
+ struct bkey_ptrs_c ptrs_c = bch2_bkey_ptrs_c(k);
+ const union bch_extent_entry *entry_c;
+ struct extent_ptr_decoded p = { 0 };
+ bool do_update = false;
+ struct printbuf buf = PRINTBUF;
int ret = 0;
- BUG_ON(!(flags & BTREE_TRIGGER_GC));
- BUG_ON(data_type != BCH_DATA_sb &&
- data_type != BCH_DATA_journal);
+ percpu_down_read(&c->mark_lock);
- /*
- * Backup superblock might be past the end of our normal usable space:
- */
- if (b >= ca->mi.nbuckets)
- return 0;
+ rcu_read_lock();
+ bkey_for_each_ptr_decode(k.k, ptrs_c, p, entry_c) {
+ struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
+ if (!ca) {
+ if (fsck_err(c, ptr_to_invalid_device,
+ "pointer to missing device %u\n"
+ "while marking %s",
+ p.ptr.dev,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+ do_update = true;
+ continue;
+ }
- percpu_down_read(&c->mark_lock);
- g = gc_bucket(ca, b);
+ struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
+ enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry_c);
+
+ if (fsck_err_on(!g->gen_valid,
+ c, ptr_to_missing_alloc_key,
+ "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
+ bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
+ p.ptr.gen,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ if (!p.ptr.cached) {
+ g->gen_valid = true;
+ g->gen = p.ptr.gen;
+ } else {
+ do_update = true;
+ }
+ }
- bucket_lock(g);
- old = *g;
+ if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0,
+ c, ptr_gen_newer_than_bucket_gen,
+ "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
+ bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
+ p.ptr.gen, g->gen,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ if (!p.ptr.cached &&
+ (g->data_type != BCH_DATA_btree ||
+ data_type == BCH_DATA_btree)) {
+ g->gen_valid = true;
+ g->gen = p.ptr.gen;
+ g->data_type = 0;
+ g->dirty_sectors = 0;
+ g->cached_sectors = 0;
+ } else {
+ do_update = true;
+ }
+ }
- if (bch2_fs_inconsistent_on(g->data_type &&
- g->data_type != data_type, c,
- "different types of data in same bucket: %s, %s",
- bch2_data_type_str(g->data_type),
- bch2_data_type_str(data_type))) {
- ret = -EIO;
- goto err;
- }
+ if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX,
+ c, ptr_gen_newer_than_bucket_gen,
+ "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
+ bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
+ p.ptr.gen,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+ do_update = true;
+
+ if (fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0,
+ c, stale_dirty_ptr,
+ "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
+ bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
+ p.ptr.gen, g->gen,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+ do_update = true;
+
+ if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen)
+ continue;
+
+ if (fsck_err_on(bucket_data_type_mismatch(g->data_type, data_type),
+ c, ptr_bucket_data_type_mismatch,
+ "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
+ bch2_data_type_str(g->data_type),
+ bch2_data_type_str(data_type),
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ if (data_type == BCH_DATA_btree) {
+ g->gen_valid = true;
+ g->gen = p.ptr.gen;
+ g->data_type = data_type;
+ g->dirty_sectors = 0;
+ g->cached_sectors = 0;
+ } else {
+ do_update = true;
+ }
+ }
- if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
- "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > bucket size",
- ca->dev_idx, b, g->gen,
- bch2_data_type_str(g->data_type ?: data_type),
- g->dirty_sectors, sectors)) {
- ret = -EIO;
- goto err;
+ if (p.has_ec) {
+ struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx);
+
+ if (fsck_err_on(!m || !m->alive, c,
+ ptr_to_missing_stripe,
+ "pointer to nonexistent stripe %llu\n"
+ "while marking %s",
+ (u64) p.ec.idx,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+ do_update = true;
+
+ if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p), c,
+ ptr_to_incorrect_stripe,
+ "pointer does not match stripe %llu\n"
+ "while marking %s",
+ (u64) p.ec.idx,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+ do_update = true;
+ }
}
+ rcu_read_unlock();
- g->data_type = data_type;
- g->dirty_sectors += sectors;
- new = *g;
+ if (do_update) {
+ if (flags & BTREE_TRIGGER_is_root) {
+ bch_err(c, "cannot update btree roots yet");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
+ ret = PTR_ERR_OR_ZERO(new);
+ if (ret)
+ goto err;
+
+ rcu_read_lock();
+ bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, !bch2_dev_rcu(c, ptr->dev));
+ rcu_read_unlock();
+
+ if (level) {
+ /*
+ * We don't want to drop btree node pointers - if the
+ * btree node isn't there anymore, the read path will
+ * sort it out:
+ */
+ struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
+ rcu_read_lock();
+ bkey_for_each_ptr(ptrs, ptr) {
+ struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
+ struct bucket *g = PTR_GC_BUCKET(ca, ptr);
+
+ ptr->gen = g->gen;
+ }
+ rcu_read_unlock();
+ } else {
+ struct bkey_ptrs ptrs;
+ union bch_extent_entry *entry;
+restart_drop_ptrs:
+ ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
+ rcu_read_lock();
+ bkey_for_each_ptr_decode(bkey_i_to_s(new).k, ptrs, p, entry) {
+ struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
+ struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
+ enum bch_data_type data_type = bch2_bkey_ptr_data_type(bkey_i_to_s_c(new), p, entry);
+
+ if ((p.ptr.cached &&
+ (!g->gen_valid || gen_cmp(p.ptr.gen, g->gen) > 0)) ||
+ (!p.ptr.cached &&
+ gen_cmp(p.ptr.gen, g->gen) < 0) ||
+ gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX ||
+ (g->data_type &&
+ g->data_type != data_type)) {
+ bch2_bkey_drop_ptr(bkey_i_to_s(new), &entry->ptr);
+ goto restart_drop_ptrs;
+ }
+ }
+ rcu_read_unlock();
+again:
+ ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
+ bkey_extent_entry_for_each(ptrs, entry) {
+ if (extent_entry_type(entry) == BCH_EXTENT_ENTRY_stripe_ptr) {
+ struct gc_stripe *m = genradix_ptr(&c->gc_stripes,
+ entry->stripe_ptr.idx);
+ union bch_extent_entry *next_ptr;
+
+ bkey_extent_entry_for_each_from(ptrs, next_ptr, entry)
+ if (extent_entry_type(next_ptr) == BCH_EXTENT_ENTRY_ptr)
+ goto found;
+ next_ptr = NULL;
+found:
+ if (!next_ptr) {
+ bch_err(c, "aieee, found stripe ptr with no data ptr");
+ continue;
+ }
+
+ if (!m || !m->alive ||
+ !__bch2_ptr_matches_stripe(&m->ptrs[entry->stripe_ptr.block],
+ &next_ptr->ptr,
+ m->sectors)) {
+ bch2_bkey_extent_entry_drop(new, entry);
+ goto again;
+ }
+ }
+ }
+ }
+
+ if (0) {
+ printbuf_reset(&buf);
+ bch2_bkey_val_to_text(&buf, c, k);
+ bch_info(c, "updated %s", buf.buf);
+
+ printbuf_reset(&buf);
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(new));
+ bch_info(c, "new key %s", buf.buf);
+ }
+
+ percpu_up_read(&c->mark_lock);
+ struct btree_iter iter;
+ bch2_trans_node_iter_init(trans, &iter, btree, new->k.p, 0, level,
+ BTREE_ITER_intent|BTREE_ITER_all_snapshots);
+ ret = bch2_btree_iter_traverse(&iter) ?:
+ bch2_trans_update(trans, &iter, new,
+ BTREE_UPDATE_internal_snapshot_node|
+ BTREE_TRIGGER_norun);
+ bch2_trans_iter_exit(trans, &iter);
+ percpu_down_read(&c->mark_lock);
+
+ if (ret)
+ goto err;
+
+ if (level)
+ bch2_btree_node_update_key_early(trans, btree, level - 1, k, new);
+ }
err:
- bucket_unlock(g);
- if (!ret)
- bch2_dev_usage_update_m(c, ca, &old, &new);
+fsck_err:
percpu_up_read(&c->mark_lock);
+ printbuf_exit(&buf);
return ret;
}
-int bch2_check_bucket_ref(struct btree_trans *trans,
- struct bkey_s_c k,
- const struct bch_extent_ptr *ptr,
- s64 sectors, enum bch_data_type ptr_data_type,
- u8 b_gen, u8 bucket_data_type,
- u32 bucket_sectors)
+int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca,
+ struct bkey_s_c k,
+ const struct bch_extent_ptr *ptr,
+ s64 sectors, enum bch_data_type ptr_data_type,
+ u8 b_gen, u8 bucket_data_type,
+ u32 *bucket_sectors)
{
struct bch_fs *c = trans->c;
- struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
size_t bucket_nr = PTR_BUCKET_NR(ca, ptr);
struct printbuf buf = PRINTBUF;
+ bool inserting = sectors > 0;
int ret = 0;
- if (bucket_data_type == BCH_DATA_cached)
- bucket_data_type = BCH_DATA_user;
-
- if ((bucket_data_type == BCH_DATA_stripe && ptr_data_type == BCH_DATA_user) ||
- (bucket_data_type == BCH_DATA_user && ptr_data_type == BCH_DATA_stripe))
- bucket_data_type = ptr_data_type = BCH_DATA_stripe;
+ BUG_ON(!sectors);
if (gen_after(ptr->gen, b_gen)) {
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
@@ -578,8 +745,9 @@ int bch2_check_bucket_ref(struct btree_trans *trans,
bch2_data_type_str(bucket_data_type ?: ptr_data_type),
ptr->gen,
(bch2_bkey_val_to_text(&buf, c, k), buf.buf));
- ret = -EIO;
- goto err;
+ if (inserting)
+ goto err;
+ goto out;
}
if (gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
@@ -592,11 +760,17 @@ int bch2_check_bucket_ref(struct btree_trans *trans,
ptr->gen,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf));
- ret = -EIO;
- goto err;
+ if (inserting)
+ goto err;
+ goto out;
+ }
+
+ if (b_gen != ptr->gen && ptr->cached) {
+ ret = 1;
+ goto out;
}
- if (b_gen != ptr->gen && !ptr->cached) {
+ if (b_gen != ptr->gen) {
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
BCH_FSCK_ERR_stale_dirty_ptr,
"bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)\n"
@@ -607,18 +781,12 @@ int bch2_check_bucket_ref(struct btree_trans *trans,
ptr->gen,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf));
- ret = -EIO;
- goto err;
- }
-
- if (b_gen != ptr->gen) {
- ret = 1;
+ if (inserting)
+ goto err;
goto out;
}
- if (!data_type_is_empty(bucket_data_type) &&
- ptr_data_type &&
- bucket_data_type != ptr_data_type) {
+ if (bucket_data_type_mismatch(bucket_data_type, ptr_data_type)) {
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
BCH_FSCK_ERR_ptr_bucket_data_type_mismatch,
"bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
@@ -628,28 +796,33 @@ int bch2_check_bucket_ref(struct btree_trans *trans,
bch2_data_type_str(ptr_data_type),
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf));
- ret = -EIO;
- goto err;
+ if (inserting)
+ goto err;
+ goto out;
}
- if ((u64) bucket_sectors + sectors > U32_MAX) {
+ if ((u64) *bucket_sectors + sectors > U32_MAX) {
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
BCH_FSCK_ERR_bucket_sector_count_overflow,
"bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U32_MAX\n"
"while marking %s",
ptr->dev, bucket_nr, b_gen,
bch2_data_type_str(bucket_data_type ?: ptr_data_type),
- bucket_sectors, sectors,
+ *bucket_sectors, sectors,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf));
- ret = -EIO;
- goto err;
+ if (inserting)
+ goto err;
+ sectors = -*bucket_sectors;
}
+
+ *bucket_sectors += sectors;
out:
printbuf_exit(&buf);
return ret;
err:
bch2_dump_trans_updates(trans);
+ ret = -EIO;
goto out;
}
@@ -786,111 +959,92 @@ need_mark:
/* KEY_TYPE_extent: */
-static int __mark_pointer(struct btree_trans *trans,
+static int __mark_pointer(struct btree_trans *trans, struct bch_dev *ca,
struct bkey_s_c k,
const struct bch_extent_ptr *ptr,
s64 sectors, enum bch_data_type ptr_data_type,
- u8 bucket_gen, u8 *bucket_data_type,
- u32 *dirty_sectors, u32 *cached_sectors)
+ struct bch_alloc_v4 *a)
{
u32 *dst_sectors = !ptr->cached
- ? dirty_sectors
- : cached_sectors;
- int ret = bch2_check_bucket_ref(trans, k, ptr, sectors, ptr_data_type,
- bucket_gen, *bucket_data_type, *dst_sectors);
+ ? &a->dirty_sectors
+ : &a->cached_sectors;
+ int ret = bch2_bucket_ref_update(trans, ca, k, ptr, sectors, ptr_data_type,
+ a->gen, a->data_type, dst_sectors);
if (ret)
return ret;
- *dst_sectors += sectors;
-
- if (!*dirty_sectors && !*cached_sectors)
- *bucket_data_type = 0;
- else if (*bucket_data_type != BCH_DATA_stripe)
- *bucket_data_type = ptr_data_type;
-
+ alloc_data_type_set(a, ptr_data_type);
return 0;
}
static int bch2_trigger_pointer(struct btree_trans *trans,
enum btree_id btree_id, unsigned level,
struct bkey_s_c k, struct extent_ptr_decoded p,
+ const union bch_extent_entry *entry,
s64 *sectors,
- unsigned flags)
+ enum btree_iter_update_trigger_flags flags)
{
- bool insert = !(flags & BTREE_TRIGGER_OVERWRITE);
+ bool insert = !(flags & BTREE_TRIGGER_overwrite);
+ int ret = 0;
+
+ struct bch_fs *c = trans->c;
+ struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev);
+ if (unlikely(!ca)) {
+ if (insert)
+ ret = -EIO;
+ goto err;
+ }
+
struct bpos bucket;
struct bch_backpointer bp;
-
- bch2_extent_ptr_to_bp(trans->c, btree_id, level, k, p, &bucket, &bp);
+ bch2_extent_ptr_to_bp(trans->c, ca, btree_id, level, k, p, entry, &bucket, &bp);
*sectors = insert ? bp.bucket_len : -((s64) bp.bucket_len);
- if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
- struct btree_iter iter;
- struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update(trans, &iter, bucket);
- int ret = PTR_ERR_OR_ZERO(a);
+ if (flags & BTREE_TRIGGER_transactional) {
+ struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update(trans, bucket);
+ ret = PTR_ERR_OR_ZERO(a) ?:
+ __mark_pointer(trans, ca, k, &p.ptr, *sectors, bp.data_type, &a->v);
if (ret)
- return ret;
-
- ret = __mark_pointer(trans, k, &p.ptr, *sectors, bp.data_type,
- a->v.gen, &a->v.data_type,
- &a->v.dirty_sectors, &a->v.cached_sectors) ?:
- bch2_trans_update(trans, &iter, &a->k_i, 0);
- bch2_trans_iter_exit(trans, &iter);
-
- if (ret)
- return ret;
+ goto err;
if (!p.ptr.cached) {
- ret = bch2_bucket_backpointer_mod(trans, bucket, bp, k, insert);
+ ret = bch2_bucket_backpointer_mod(trans, ca, bucket, bp, k, insert);
if (ret)
- return ret;
+ goto err;
}
}
- if (flags & BTREE_TRIGGER_GC) {
- struct bch_fs *c = trans->c;
- struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
- enum bch_data_type data_type = bkey_ptr_data_type(btree_id, level, k, p);
-
+ if (flags & BTREE_TRIGGER_gc) {
percpu_down_read(&c->mark_lock);
- struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
+ struct bucket *g = gc_bucket(ca, bucket.offset);
bucket_lock(g);
- struct bucket old = *g;
-
- u8 bucket_data_type = g->data_type;
- int ret = __mark_pointer(trans, k, &p.ptr, *sectors,
- data_type, g->gen,
- &bucket_data_type,
- &g->dirty_sectors,
- &g->cached_sectors);
- if (ret) {
- bucket_unlock(g);
- percpu_up_read(&c->mark_lock);
- return ret;
+ struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old;
+ ret = __mark_pointer(trans, ca, k, &p.ptr, *sectors, bp.data_type, &new);
+ if (!ret) {
+ alloc_to_bucket(g, new);
+ bch2_dev_usage_update(c, ca, &old, &new, 0, true);
}
-
- g->data_type = bucket_data_type;
- struct bucket new = *g;
bucket_unlock(g);
- bch2_dev_usage_update_m(c, ca, &old, &new);
percpu_up_read(&c->mark_lock);
}
-
- return 0;
+err:
+ bch2_dev_put(ca);
+ return ret;
}
static int bch2_trigger_stripe_ptr(struct btree_trans *trans,
struct bkey_s_c k,
struct extent_ptr_decoded p,
enum bch_data_type data_type,
- s64 sectors, unsigned flags)
+ s64 sectors,
+ enum btree_iter_update_trigger_flags flags)
{
- if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
+ if (flags & BTREE_TRIGGER_transactional) {
struct btree_iter iter;
struct bkey_i_stripe *s = bch2_bkey_get_mut_typed(trans, &iter,
BTREE_ID_stripes, POS(0, p.ec.idx),
- BTREE_ITER_WITH_UPDATES, stripe);
+ BTREE_ITER_with_updates, stripe);
int ret = PTR_ERR_OR_ZERO(s);
if (unlikely(ret)) {
bch2_trans_inconsistent_on(bch2_err_matches(ret, ENOENT), trans,
@@ -920,10 +1074,10 @@ err:
return ret;
}
- if (flags & BTREE_TRIGGER_GC) {
+ if (flags & BTREE_TRIGGER_gc) {
struct bch_fs *c = trans->c;
- BUG_ON(!(flags & BTREE_TRIGGER_GC));
+ BUG_ON(!(flags & BTREE_TRIGGER_gc));
struct gc_stripe *m = genradix_ptr_alloc(&c->gc_stripes, p.ec.idx, GFP_KERNEL);
if (!m) {
@@ -959,9 +1113,10 @@ err:
static int __trigger_extent(struct btree_trans *trans,
enum btree_id btree_id, unsigned level,
- struct bkey_s_c k, unsigned flags)
+ struct bkey_s_c k,
+ enum btree_iter_update_trigger_flags flags)
{
- bool gc = flags & BTREE_TRIGGER_GC;
+ bool gc = flags & BTREE_TRIGGER_gc;
struct bch_fs *c = trans->c;
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
@@ -970,7 +1125,7 @@ static int __trigger_extent(struct btree_trans *trans,
enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
? BCH_DATA_btree
: BCH_DATA_user;
- s64 dirty_sectors = 0;
+ s64 replicas_sectors = 0;
int ret = 0;
r.e.data_type = data_type;
@@ -979,7 +1134,7 @@ static int __trigger_extent(struct btree_trans *trans,
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
s64 disk_sectors;
- ret = bch2_trigger_pointer(trans, btree_id, level, k, p, &disk_sectors, flags);
+ ret = bch2_trigger_pointer(trans, btree_id, level, k, p, entry, &disk_sectors, flags);
if (ret < 0)
return ret;
@@ -996,7 +1151,7 @@ static int __trigger_extent(struct btree_trans *trans,
return ret;
}
} else if (!p.has_ec) {
- dirty_sectors += disk_sectors;
+ replicas_sectors += disk_sectors;
r.e.devs[r.e.nr_devs++] = p.ptr.dev;
} else {
ret = bch2_trigger_stripe_ptr(trans, k, p, data_type, disk_sectors, flags);
@@ -1014,8 +1169,8 @@ static int __trigger_extent(struct btree_trans *trans,
if (r.e.nr_devs) {
ret = !gc
- ? bch2_update_replicas_list(trans, &r.e, dirty_sectors)
- : bch2_update_replicas(c, k, &r.e, dirty_sectors, 0, true);
+ ? bch2_update_replicas_list(trans, &r.e, replicas_sectors)
+ : bch2_update_replicas(c, k, &r.e, replicas_sectors, 0, true);
if (unlikely(ret && gc)) {
struct printbuf buf = PRINTBUF;
@@ -1031,15 +1186,18 @@ static int __trigger_extent(struct btree_trans *trans,
}
int bch2_trigger_extent(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
+ enum btree_id btree, unsigned level,
struct bkey_s_c old, struct bkey_s new,
- unsigned flags)
+ enum btree_iter_update_trigger_flags flags)
{
struct bkey_ptrs_c new_ptrs = bch2_bkey_ptrs_c(new.s_c);
struct bkey_ptrs_c old_ptrs = bch2_bkey_ptrs_c(old);
unsigned new_ptrs_bytes = (void *) new_ptrs.end - (void *) new_ptrs.start;
unsigned old_ptrs_bytes = (void *) old_ptrs.end - (void *) old_ptrs.start;
+ if (unlikely(flags & BTREE_TRIGGER_check_repair))
+ return bch2_check_fix_ptrs(trans, btree, level, new.s_c, flags);
+
/* if pointers aren't changing - nothing to do: */
if (new_ptrs_bytes == old_ptrs_bytes &&
!memcmp(new_ptrs.start,
@@ -1047,7 +1205,7 @@ int bch2_trigger_extent(struct btree_trans *trans,
new_ptrs_bytes))
return 0;
- if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
+ if (flags & BTREE_TRIGGER_transactional) {
struct bch_fs *c = trans->c;
int mod = (int) bch2_bkey_needs_rebalance(c, new.s_c) -
(int) bch2_bkey_needs_rebalance(c, old);
@@ -1060,8 +1218,8 @@ int bch2_trigger_extent(struct btree_trans *trans,
}
}
- if (flags & (BTREE_TRIGGER_TRANSACTIONAL|BTREE_TRIGGER_GC))
- return trigger_run_overwrite_then_insert(__trigger_extent, trans, btree_id, level, old, new, flags);
+ if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc))
+ return trigger_run_overwrite_then_insert(__trigger_extent, trans, btree, level, old, new, flags);
return 0;
}
@@ -1069,17 +1227,17 @@ int bch2_trigger_extent(struct btree_trans *trans,
/* KEY_TYPE_reservation */
static int __trigger_reservation(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c k, unsigned flags)
+ enum btree_id btree_id, unsigned level, struct bkey_s_c k,
+ enum btree_iter_update_trigger_flags flags)
{
struct bch_fs *c = trans->c;
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
s64 sectors = (s64) k.k->size * replicas;
- if (flags & BTREE_TRIGGER_OVERWRITE)
+ if (flags & BTREE_TRIGGER_overwrite)
sectors = -sectors;
- if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
+ if (flags & BTREE_TRIGGER_transactional) {
int ret = bch2_replicas_deltas_realloc(trans, 0);
if (ret)
return ret;
@@ -1090,7 +1248,7 @@ static int __trigger_reservation(struct btree_trans *trans,
d->persistent_reserved[replicas - 1] += sectors;
}
- if (flags & BTREE_TRIGGER_GC) {
+ if (flags & BTREE_TRIGGER_gc) {
percpu_down_read(&c->mark_lock);
preempt_disable();
@@ -1110,7 +1268,7 @@ static int __trigger_reservation(struct btree_trans *trans,
int bch2_trigger_reservation(struct btree_trans *trans,
enum btree_id btree_id, unsigned level,
struct bkey_s_c old, struct bkey_s new,
- unsigned flags)
+ enum btree_iter_update_trigger_flags flags)
{
return trigger_run_overwrite_then_insert(__trigger_reservation, trans, btree_id, level, old, new, flags);
}
@@ -1118,22 +1276,16 @@ int bch2_trigger_reservation(struct btree_trans *trans,
/* Mark superblocks: */
static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
- struct bch_dev *ca, size_t b,
+ struct bch_dev *ca, u64 b,
enum bch_data_type type,
unsigned sectors)
{
struct bch_fs *c = trans->c;
struct btree_iter iter;
- struct bkey_i_alloc_v4 *a;
int ret = 0;
- /*
- * Backup superblock might be past the end of our normal usable space:
- */
- if (b >= ca->mi.nbuckets)
- return 0;
-
- a = bch2_trans_start_alloc_update(trans, &iter, POS(ca->dev_idx, b));
+ struct bkey_i_alloc_v4 *a =
+ bch2_trans_start_alloc_update_noupdate(trans, &iter, POS(ca->dev_idx, b));
if (IS_ERR(a))
return PTR_ERR(a);
@@ -1161,20 +1313,75 @@ err:
return ret;
}
+static int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
+ u64 b, enum bch_data_type data_type, unsigned sectors,
+ enum btree_iter_update_trigger_flags flags)
+{
+ int ret = 0;
+
+ percpu_down_read(&c->mark_lock);
+ struct bucket *g = gc_bucket(ca, b);
+
+ bucket_lock(g);
+ struct bch_alloc_v4 old = bucket_m_to_alloc(*g);
+
+ if (bch2_fs_inconsistent_on(g->data_type &&
+ g->data_type != data_type, c,
+ "different types of data in same bucket: %s, %s",
+ bch2_data_type_str(g->data_type),
+ bch2_data_type_str(data_type))) {
+ ret = -EIO;
+ goto err;
+ }
+
+ if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
+ "bucket %u:%llu gen %u data type %s sector count overflow: %u + %u > bucket size",
+ ca->dev_idx, b, g->gen,
+ bch2_data_type_str(g->data_type ?: data_type),
+ g->dirty_sectors, sectors)) {
+ ret = -EIO;
+ goto err;
+ }
+
+ g->data_type = data_type;
+ g->dirty_sectors += sectors;
+ struct bch_alloc_v4 new = bucket_m_to_alloc(*g);
+err:
+ bucket_unlock(g);
+ if (!ret)
+ bch2_dev_usage_update(c, ca, &old, &new, 0, true);
+ percpu_up_read(&c->mark_lock);
+ return ret;
+}
+
int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
- struct bch_dev *ca, size_t b,
- enum bch_data_type type,
- unsigned sectors)
+ struct bch_dev *ca, u64 b,
+ enum bch_data_type type, unsigned sectors,
+ enum btree_iter_update_trigger_flags flags)
{
- return commit_do(trans, NULL, NULL, 0,
- __bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
+ BUG_ON(type != BCH_DATA_free &&
+ type != BCH_DATA_sb &&
+ type != BCH_DATA_journal);
+
+ /*
+ * Backup superblock might be past the end of our normal usable space:
+ */
+ if (b >= ca->mi.nbuckets)
+ return 0;
+
+ if (flags & BTREE_TRIGGER_gc)
+ return bch2_mark_metadata_bucket(trans->c, ca, b, type, sectors, flags);
+ else if (flags & BTREE_TRIGGER_transactional)
+ return commit_do(trans, NULL, NULL, 0,
+ __bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
+ else
+ BUG();
}
static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
- struct bch_dev *ca,
- u64 start, u64 end,
- enum bch_data_type type,
- u64 *bucket, unsigned *bucket_sectors)
+ struct bch_dev *ca, u64 start, u64 end,
+ enum bch_data_type type, u64 *bucket, unsigned *bucket_sectors,
+ enum btree_iter_update_trigger_flags flags)
{
do {
u64 b = sector_to_bucket(ca, start);
@@ -1183,7 +1390,7 @@ static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
if (b != *bucket && *bucket_sectors) {
int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket,
- type, *bucket_sectors);
+ type, *bucket_sectors, flags);
if (ret)
return ret;
@@ -1198,8 +1405,8 @@ static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
return 0;
}
-static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
- struct bch_dev *ca)
+static int __bch2_trans_mark_dev_sb(struct btree_trans *trans, struct bch_dev *ca,
+ enum btree_iter_update_trigger_flags flags)
{
struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
u64 bucket = 0;
@@ -1212,21 +1419,21 @@ static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
if (offset == BCH_SB_SECTOR) {
ret = bch2_trans_mark_metadata_sectors(trans, ca,
0, BCH_SB_SECTOR,
- BCH_DATA_sb, &bucket, &bucket_sectors);
+ BCH_DATA_sb, &bucket, &bucket_sectors, flags);
if (ret)
return ret;
}
ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
offset + (1 << layout->sb_max_size_bits),
- BCH_DATA_sb, &bucket, &bucket_sectors);
+ BCH_DATA_sb, &bucket, &bucket_sectors, flags);
if (ret)
return ret;
}
if (bucket_sectors) {
ret = bch2_trans_mark_metadata_bucket(trans, ca,
- bucket, BCH_DATA_sb, bucket_sectors);
+ bucket, BCH_DATA_sb, bucket_sectors, flags);
if (ret)
return ret;
}
@@ -1234,7 +1441,7 @@ static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
for (i = 0; i < ca->journal.nr; i++) {
ret = bch2_trans_mark_metadata_bucket(trans, ca,
ca->journal.buckets[i],
- BCH_DATA_journal, ca->mi.bucket_size);
+ BCH_DATA_journal, ca->mi.bucket_size, flags);
if (ret)
return ret;
}
@@ -1242,20 +1449,22 @@ static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
return 0;
}
-int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
+int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca,
+ enum btree_iter_update_trigger_flags flags)
{
- int ret = bch2_trans_run(c, __bch2_trans_mark_dev_sb(trans, ca));
-
+ int ret = bch2_trans_run(c,
+ __bch2_trans_mark_dev_sb(trans, ca, flags));
bch_err_fn(c, ret);
return ret;
}
-int bch2_trans_mark_dev_sbs(struct bch_fs *c)
+int bch2_trans_mark_dev_sbs_flags(struct bch_fs *c,
+ enum btree_iter_update_trigger_flags flags)
{
for_each_online_member(c, ca) {
- int ret = bch2_trans_mark_dev_sb(c, ca);
+ int ret = bch2_trans_mark_dev_sb(c, ca, flags);
if (ret) {
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
return ret;
}
}
@@ -1263,6 +1472,11 @@ int bch2_trans_mark_dev_sbs(struct bch_fs *c)
return 0;
}
+int bch2_trans_mark_dev_sbs(struct bch_fs *c)
+{
+ return bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_transactional);
+}
+
/* Disk reservations: */
#define SECTORS_CACHE 1024
@@ -1331,6 +1545,31 @@ recalculate:
/* Startup/shutdown: */
+void bch2_buckets_nouse_free(struct bch_fs *c)
+{
+ for_each_member_device(c, ca) {
+ kvfree_rcu_mightsleep(ca->buckets_nouse);
+ ca->buckets_nouse = NULL;
+ }
+}
+
+int bch2_buckets_nouse_alloc(struct bch_fs *c)
+{
+ for_each_member_device(c, ca) {
+ BUG_ON(ca->buckets_nouse);
+
+ ca->buckets_nouse = kvmalloc(BITS_TO_LONGS(ca->mi.nbuckets) *
+ sizeof(unsigned long),
+ GFP_KERNEL|__GFP_ZERO);
+ if (!ca->buckets_nouse) {
+ bch2_dev_put(ca);
+ return -BCH_ERR_ENOMEM_buckets_nouse;
+ }
+ }
+
+ return 0;
+}
+
static void bucket_gens_free_rcu(struct rcu_head *rcu)
{
struct bucket_gens *buckets =
@@ -1342,24 +1581,17 @@ static void bucket_gens_free_rcu(struct rcu_head *rcu)
int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
{
struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
- unsigned long *buckets_nouse = NULL;
bool resize = ca->bucket_gens != NULL;
int ret;
+ BUG_ON(resize && ca->buckets_nouse);
+
if (!(bucket_gens = kvmalloc(sizeof(struct bucket_gens) + nbuckets,
GFP_KERNEL|__GFP_ZERO))) {
ret = -BCH_ERR_ENOMEM_bucket_gens;
goto err;
}
- if ((c->opts.buckets_nouse &&
- !(buckets_nouse = kvmalloc(BITS_TO_LONGS(nbuckets) *
- sizeof(unsigned long),
- GFP_KERNEL|__GFP_ZERO)))) {
- ret = -BCH_ERR_ENOMEM_buckets_nouse;
- goto err;
- }
-
bucket_gens->first_bucket = ca->mi.first_bucket;
bucket_gens->nbuckets = nbuckets;
@@ -1377,17 +1609,11 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
memcpy(bucket_gens->b,
old_bucket_gens->b,
n);
- if (buckets_nouse)
- memcpy(buckets_nouse,
- ca->buckets_nouse,
- BITS_TO_LONGS(n) * sizeof(unsigned long));
}
rcu_assign_pointer(ca->bucket_gens, bucket_gens);
bucket_gens = old_bucket_gens;
- swap(ca->buckets_nouse, buckets_nouse);
-
nbuckets = ca->mi.nbuckets;
if (resize) {
@@ -1398,7 +1624,6 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
ret = 0;
err:
- kvfree(buckets_nouse);
if (bucket_gens)
call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu);
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index 6387e039f789..617ffde2fb7a 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -12,7 +12,7 @@
#include "extents.h"
#include "sb-members.h"
-static inline size_t sector_to_bucket(const struct bch_dev *ca, sector_t s)
+static inline u64 sector_to_bucket(const struct bch_dev *ca, sector_t s)
{
return div_u64(s, ca->mi.bucket_size);
}
@@ -30,8 +30,7 @@ static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
return remainder;
}
-static inline size_t sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s,
- u32 *offset)
+static inline u64 sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s, u32 *offset)
{
return div_u64_rem(s, ca->mi.bucket_size, offset);
}
@@ -94,7 +93,7 @@ static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b)
{
struct bucket_array *buckets = gc_bucket_array(ca);
- BUG_ON(b < buckets->first_bucket || b >= buckets->nbuckets);
+ BUG_ON(!bucket_valid(ca, b));
return buckets->b + b;
}
@@ -111,7 +110,7 @@ static inline u8 *bucket_gen(struct bch_dev *ca, size_t b)
{
struct bucket_gens *gens = bucket_gens(ca);
- BUG_ON(b < gens->first_bucket || b >= gens->nbuckets);
+ BUG_ON(!bucket_valid(ca, b));
return gens->b + b;
}
@@ -121,20 +120,16 @@ static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
return sector_to_bucket(ca, ptr->offset);
}
-static inline struct bpos PTR_BUCKET_POS(const struct bch_fs *c,
- const struct bch_extent_ptr *ptr)
+static inline struct bpos PTR_BUCKET_POS(const struct bch_dev *ca,
+ const struct bch_extent_ptr *ptr)
{
- struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
-
return POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
}
-static inline struct bpos PTR_BUCKET_POS_OFFSET(const struct bch_fs *c,
+static inline struct bpos PTR_BUCKET_POS_OFFSET(const struct bch_dev *ca,
const struct bch_extent_ptr *ptr,
u32 *bucket_offset)
{
- struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
-
return POS(ptr->dev, sector_to_bucket_and_offset(ca, ptr->offset, bucket_offset));
}
@@ -175,17 +170,19 @@ static inline int gen_after(u8 a, u8 b)
return r > 0 ? r : 0;
}
+static inline u8 dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
+{
+ return gen_after(*bucket_gen(ca, PTR_BUCKET_NR(ca, ptr)), ptr->gen);
+}
+
/**
- * ptr_stale() - check if a pointer points into a bucket that has been
+ * dev_ptr_stale() - check if a pointer points into a bucket that has been
* invalidated.
*/
-static inline u8 ptr_stale(struct bch_dev *ca,
- const struct bch_extent_ptr *ptr)
+static inline u8 dev_ptr_stale(struct bch_dev *ca, const struct bch_extent_ptr *ptr)
{
- u8 ret;
-
rcu_read_lock();
- ret = gen_after(*bucket_gen(ca, PTR_BUCKET_NR(ca, ptr)), ptr->gen);
+ u8 ret = dev_ptr_stale_rcu(ca, ptr);
rcu_read_unlock();
return ret;
@@ -226,6 +223,7 @@ static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_waterma
fallthrough;
case BCH_WATERMARK_btree_copygc:
case BCH_WATERMARK_reclaim:
+ case BCH_WATERMARK_interior_updates:
break;
}
@@ -305,8 +303,6 @@ bch2_fs_usage_read_short(struct bch_fs *);
void bch2_dev_usage_update(struct bch_fs *, struct bch_dev *,
const struct bch_alloc_v4 *,
const struct bch_alloc_v4 *, u64, bool);
-void bch2_dev_usage_update_m(struct bch_fs *, struct bch_dev *,
- struct bucket *, struct bucket *);
/* key/bucket marking: */
@@ -332,27 +328,29 @@ int bch2_replicas_deltas_realloc(struct btree_trans *, unsigned);
void bch2_fs_usage_initialize(struct bch_fs *);
-int bch2_check_bucket_ref(struct btree_trans *, struct bkey_s_c,
- const struct bch_extent_ptr *,
- s64, enum bch_data_type, u8, u8, u32);
+int bch2_bucket_ref_update(struct btree_trans *, struct bch_dev *,
+ struct bkey_s_c, const struct bch_extent_ptr *,
+ s64, enum bch_data_type, u8, u8, u32 *);
-int bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
- size_t, enum bch_data_type, unsigned,
- struct gc_pos, unsigned);
+int bch2_check_fix_ptrs(struct btree_trans *,
+ enum btree_id, unsigned, struct bkey_s_c,
+ enum btree_iter_update_trigger_flags);
int bch2_trigger_extent(struct btree_trans *, enum btree_id, unsigned,
- struct bkey_s_c, struct bkey_s, unsigned);
+ struct bkey_s_c, struct bkey_s,
+ enum btree_iter_update_trigger_flags);
int bch2_trigger_reservation(struct btree_trans *, enum btree_id, unsigned,
- struct bkey_s_c, struct bkey_s, unsigned);
+ struct bkey_s_c, struct bkey_s,
+ enum btree_iter_update_trigger_flags);
#define trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags)\
({ \
int ret = 0; \
\
if (_old.k->type) \
- ret = _fn(_trans, _btree_id, _level, _old, _flags & ~BTREE_TRIGGER_INSERT); \
+ ret = _fn(_trans, _btree_id, _level, _old, _flags & ~BTREE_TRIGGER_insert); \
if (!ret && _new.k->type) \
- ret = _fn(_trans, _btree_id, _level, _new.s_c, _flags & ~BTREE_TRIGGER_OVERWRITE);\
+ ret = _fn(_trans, _btree_id, _level, _new.s_c, _flags & ~BTREE_TRIGGER_overwrite);\
ret; \
})
@@ -361,9 +359,13 @@ void bch2_trans_account_disk_usage_change(struct btree_trans *);
void bch2_trans_fs_usage_revert(struct btree_trans *, struct replicas_delta_list *);
int bch2_trans_fs_usage_apply(struct btree_trans *, struct replicas_delta_list *);
-int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *,
- size_t, enum bch_data_type, unsigned);
-int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *);
+int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *, u64,
+ enum bch_data_type, unsigned,
+ enum btree_iter_update_trigger_flags);
+int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *,
+ enum btree_iter_update_trigger_flags);
+int bch2_trans_mark_dev_sbs_flags(struct bch_fs *,
+ enum btree_iter_update_trigger_flags);
int bch2_trans_mark_dev_sbs(struct bch_fs *);
static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b)
@@ -394,14 +396,6 @@ static inline const char *bch2_data_type_str(enum bch_data_type type)
: "(invalid data type)";
}
-static inline void bch2_prt_data_type(struct printbuf *out, enum bch_data_type type)
-{
- if (type < BCH_DATA_NR)
- prt_str(out, __bch2_data_types[type]);
- else
- prt_printf(out, "(invalid data type %u)", type);
-}
-
/* disk reservations: */
static inline void bch2_disk_reservation_put(struct bch_fs *c,
@@ -471,6 +465,9 @@ static inline u64 avail_factor(u64 r)
return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1);
}
+void bch2_buckets_nouse_free(struct bch_fs *);
+int bch2_buckets_nouse_alloc(struct bch_fs *);
+
int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
void bch2_dev_buckets_free(struct bch_dev *);
int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *);
diff --git a/fs/bcachefs/chardev.c b/fs/bcachefs/chardev.c
index 38defa19d52d..9e54323f0f5f 100644
--- a/fs/bcachefs/chardev.c
+++ b/fs/bcachefs/chardev.c
@@ -7,7 +7,7 @@
#include "chardev.h"
#include "journal.h"
#include "move.h"
-#include "recovery.h"
+#include "recovery_passes.h"
#include "replicas.h"
#include "super.h"
#include "super-io.h"
@@ -32,12 +32,7 @@ static struct bch_dev *bch2_device_lookup(struct bch_fs *c, u64 dev,
if (dev >= c->sb.nr_devices)
return ERR_PTR(-EINVAL);
- rcu_read_lock();
- ca = rcu_dereference(c->devs[dev]);
- if (ca)
- percpu_ref_get(&ca->ref);
- rcu_read_unlock();
-
+ ca = bch2_dev_tryget_noerror(c, dev);
if (!ca)
return ERR_PTR(-EINVAL);
} else {
@@ -134,42 +129,38 @@ static long bch2_ioctl_incremental(struct bch_ioctl_incremental __user *user_arg
struct fsck_thread {
struct thread_with_stdio thr;
struct bch_fs *c;
- char **devs;
- size_t nr_devs;
struct bch_opts opts;
};
static void bch2_fsck_thread_exit(struct thread_with_stdio *_thr)
{
struct fsck_thread *thr = container_of(_thr, struct fsck_thread, thr);
- if (thr->devs)
- for (size_t i = 0; i < thr->nr_devs; i++)
- kfree(thr->devs[i]);
- kfree(thr->devs);
kfree(thr);
}
static int bch2_fsck_offline_thread_fn(struct thread_with_stdio *stdio)
{
struct fsck_thread *thr = container_of(stdio, struct fsck_thread, thr);
- struct bch_fs *c = bch2_fs_open(thr->devs, thr->nr_devs, thr->opts);
-
- if (IS_ERR(c))
- return PTR_ERR(c);
+ struct bch_fs *c = thr->c;
- int ret = 0;
- if (test_bit(BCH_FS_errors_fixed, &c->flags))
- ret |= 1;
- if (test_bit(BCH_FS_error, &c->flags))
- ret |= 4;
+ int ret = PTR_ERR_OR_ZERO(c);
+ if (ret)
+ return ret;
- bch2_fs_stop(c);
+ ret = bch2_fs_start(thr->c);
+ if (ret)
+ goto err;
- if (ret & 1)
+ if (test_bit(BCH_FS_errors_fixed, &c->flags)) {
bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: errors fixed\n", c->name);
- if (ret & 4)
+ ret |= 1;
+ }
+ if (test_bit(BCH_FS_error, &c->flags)) {
bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: still has errors\n", c->name);
-
+ ret |= 4;
+ }
+err:
+ bch2_fs_stop(c);
return ret;
}
@@ -182,7 +173,7 @@ static long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_a
{
struct bch_ioctl_fsck_offline arg;
struct fsck_thread *thr = NULL;
- u64 *devs = NULL;
+ darray_str(devs) = {};
long ret = 0;
if (copy_from_user(&arg, user_arg, sizeof(arg)))
@@ -194,29 +185,32 @@ static long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_a
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!(devs = kcalloc(arg.nr_devs, sizeof(*devs), GFP_KERNEL)) ||
- !(thr = kzalloc(sizeof(*thr), GFP_KERNEL)) ||
- !(thr->devs = kcalloc(arg.nr_devs, sizeof(*thr->devs), GFP_KERNEL))) {
- ret = -ENOMEM;
- goto err;
- }
+ for (size_t i = 0; i < arg.nr_devs; i++) {
+ u64 dev_u64;
+ ret = copy_from_user_errcode(&dev_u64, &user_arg->devs[i], sizeof(u64));
+ if (ret)
+ goto err;
- thr->opts = bch2_opts_empty();
- thr->nr_devs = arg.nr_devs;
+ char *dev_str = strndup_user((char __user *)(unsigned long) dev_u64, PATH_MAX);
+ ret = PTR_ERR_OR_ZERO(dev_str);
+ if (ret)
+ goto err;
- if (copy_from_user(devs, &user_arg->devs[0],
- array_size(sizeof(user_arg->devs[0]), arg.nr_devs))) {
- ret = -EINVAL;
- goto err;
+ ret = darray_push(&devs, dev_str);
+ if (ret) {
+ kfree(dev_str);
+ goto err;
+ }
}
- for (size_t i = 0; i < arg.nr_devs; i++) {
- thr->devs[i] = strndup_user((char __user *)(unsigned long) devs[i], PATH_MAX);
- ret = PTR_ERR_OR_ZERO(thr->devs[i]);
- if (ret)
- goto err;
+ thr = kzalloc(sizeof(*thr), GFP_KERNEL);
+ if (!thr) {
+ ret = -ENOMEM;
+ goto err;
}
+ thr->opts = bch2_opts_empty();
+
if (arg.opts) {
char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16);
@@ -230,15 +224,28 @@ static long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_a
opt_set(thr->opts, stdio, (u64)(unsigned long)&thr->thr.stdio);
- ret = bch2_run_thread_with_stdio(&thr->thr, &bch2_offline_fsck_ops);
-err:
- if (ret < 0) {
- if (thr)
- bch2_fsck_thread_exit(&thr->thr);
- pr_err("ret %s", bch2_err_str(ret));
- }
- kfree(devs);
+ /* We need request_key() to be called before we punt to kthread: */
+ opt_set(thr->opts, nostart, true);
+
+ bch2_thread_with_stdio_init(&thr->thr, &bch2_offline_fsck_ops);
+
+ thr->c = bch2_fs_open(devs.data, arg.nr_devs, thr->opts);
+
+ if (!IS_ERR(thr->c) &&
+ thr->c->opts.errors == BCH_ON_ERROR_panic)
+ thr->c->opts.errors = BCH_ON_ERROR_ro;
+
+ ret = __bch2_run_thread_with_stdio(&thr->thr);
+out:
+ darray_for_each(devs, i)
+ kfree(*i);
+ darray_exit(&devs);
return ret;
+err:
+ if (thr)
+ bch2_fsck_thread_exit(&thr->thr);
+ pr_err("ret %s", bch2_err_str(ret));
+ goto out;
}
static long bch2_global_ioctl(unsigned cmd, void __user *arg)
@@ -379,7 +386,7 @@ static long bch2_ioctl_disk_offline(struct bch_fs *c, struct bch_ioctl_disk arg)
return PTR_ERR(ca);
ret = bch2_dev_offline(c, ca, arg.flags);
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
return ret;
}
@@ -408,7 +415,7 @@ static long bch2_ioctl_disk_set_state(struct bch_fs *c,
if (ret)
bch_err(c, "Error setting device state: %s", bch2_err_str(ret));
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
return ret;
}
@@ -603,7 +610,7 @@ static long bch2_ioctl_dev_usage(struct bch_fs *c,
arg.d[i].fragmented = src.d[i].fragmented;
}
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
return copy_to_user_errcode(user_arg, &arg, sizeof(arg));
}
@@ -655,7 +662,7 @@ static long bch2_ioctl_dev_usage_v2(struct bch_fs *c,
goto err;
}
err:
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
return ret;
}
@@ -677,11 +684,9 @@ static long bch2_ioctl_read_super(struct bch_fs *c,
if (arg.flags & BCH_READ_DEV) {
ca = bch2_device_lookup(c, arg.dev, arg.flags);
-
- if (IS_ERR(ca)) {
- ret = PTR_ERR(ca);
- goto err;
- }
+ ret = PTR_ERR_OR_ZERO(ca);
+ if (ret)
+ goto err_unlock;
sb = ca->disk_sb.sb;
} else {
@@ -696,8 +701,8 @@ static long bch2_ioctl_read_super(struct bch_fs *c,
ret = copy_to_user_errcode((void __user *)(unsigned long)arg.sb, sb,
vstruct_bytes(sb));
err:
- if (!IS_ERR_OR_NULL(ca))
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
+err_unlock:
mutex_unlock(&c->sb_lock);
return ret;
}
@@ -741,7 +746,7 @@ static long bch2_ioctl_disk_resize(struct bch_fs *c,
ret = bch2_dev_resize(c, ca, arg.nbuckets);
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
return ret;
}
@@ -767,7 +772,7 @@ static long bch2_ioctl_disk_resize_journal(struct bch_fs *c,
ret = bch2_set_nr_journal_buckets(c, ca, arg.nbuckets);
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
return ret;
}
@@ -949,7 +954,9 @@ static const struct file_operations bch_chardev_fops = {
};
static int bch_chardev_major;
-static struct class *bch_chardev_class;
+static const struct class bch_chardev_class = {
+ .name = "bcachefs",
+};
static struct device *bch_chardev;
void bch2_fs_chardev_exit(struct bch_fs *c)
@@ -966,7 +973,7 @@ int bch2_fs_chardev_init(struct bch_fs *c)
if (c->minor < 0)
return c->minor;
- c->chardev = device_create(bch_chardev_class, NULL,
+ c->chardev = device_create(&bch_chardev_class, NULL,
MKDEV(bch_chardev_major, c->minor), c,
"bcachefs%u-ctl", c->minor);
if (IS_ERR(c->chardev))
@@ -977,32 +984,39 @@ int bch2_fs_chardev_init(struct bch_fs *c)
void bch2_chardev_exit(void)
{
- if (!IS_ERR_OR_NULL(bch_chardev_class))
- device_destroy(bch_chardev_class,
- MKDEV(bch_chardev_major, U8_MAX));
- if (!IS_ERR_OR_NULL(bch_chardev_class))
- class_destroy(bch_chardev_class);
+ device_destroy(&bch_chardev_class, MKDEV(bch_chardev_major, U8_MAX));
+ class_unregister(&bch_chardev_class);
if (bch_chardev_major > 0)
unregister_chrdev(bch_chardev_major, "bcachefs");
}
int __init bch2_chardev_init(void)
{
+ int ret;
+
bch_chardev_major = register_chrdev(0, "bcachefs-ctl", &bch_chardev_fops);
if (bch_chardev_major < 0)
return bch_chardev_major;
- bch_chardev_class = class_create("bcachefs");
- if (IS_ERR(bch_chardev_class))
- return PTR_ERR(bch_chardev_class);
+ ret = class_register(&bch_chardev_class);
+ if (ret)
+ goto major_out;
- bch_chardev = device_create(bch_chardev_class, NULL,
+ bch_chardev = device_create(&bch_chardev_class, NULL,
MKDEV(bch_chardev_major, U8_MAX),
NULL, "bcachefs-ctl");
- if (IS_ERR(bch_chardev))
- return PTR_ERR(bch_chardev);
+ if (IS_ERR(bch_chardev)) {
+ ret = PTR_ERR(bch_chardev);
+ goto class_out;
+ }
return 0;
+
+class_out:
+ class_unregister(&bch_chardev_class);
+major_out:
+ unregister_chrdev(bch_chardev_major, "bcachefs-ctl");
+ return ret;
}
#endif /* NO_BCACHEFS_CHARDEV */
diff --git a/fs/bcachefs/checksum.c b/fs/bcachefs/checksum.c
index 4701457f6381..85198f391e9c 100644
--- a/fs/bcachefs/checksum.c
+++ b/fs/bcachefs/checksum.c
@@ -102,6 +102,7 @@ static inline int do_encrypt_sg(struct crypto_sync_skcipher *tfm,
int ret;
skcipher_request_set_sync_tfm(req, tfm);
+ skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg, sg, len, nonce.d);
ret = crypto_skcipher_encrypt(req);
@@ -429,15 +430,20 @@ int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
extent_nonce(version, crc_old), bio);
if (bch2_crc_cmp(merged, crc_old.csum) && !c->opts.no_data_io) {
- bch_err(c, "checksum error in %s() (memory corruption or bug?)\n"
- "expected %0llx:%0llx got %0llx:%0llx (old type %s new type %s)",
- __func__,
- crc_old.csum.hi,
- crc_old.csum.lo,
- merged.hi,
- merged.lo,
- bch2_csum_types[crc_old.csum_type],
- bch2_csum_types[new_csum_type]);
+ struct printbuf buf = PRINTBUF;
+ prt_printf(&buf, "checksum error in %s() (memory corruption or bug?)\n"
+ "expected %0llx:%0llx got %0llx:%0llx (old type ",
+ __func__,
+ crc_old.csum.hi,
+ crc_old.csum.lo,
+ merged.hi,
+ merged.lo);
+ bch2_prt_csum_type(&buf, crc_old.csum_type);
+ prt_str(&buf, " new type ");
+ bch2_prt_csum_type(&buf, new_csum_type);
+ prt_str(&buf, ")");
+ bch_err(c, "%s", buf.buf);
+ printbuf_exit(&buf);
return -EIO;
}
@@ -463,9 +469,8 @@ int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
/* BCH_SB_FIELD_crypt: */
-static int bch2_sb_crypt_validate(struct bch_sb *sb,
- struct bch_sb_field *f,
- struct printbuf *err)
+static int bch2_sb_crypt_validate(struct bch_sb *sb, struct bch_sb_field *f,
+ enum bch_validate_flags flags, struct printbuf *err)
{
struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
@@ -488,14 +493,10 @@ static void bch2_sb_crypt_to_text(struct printbuf *out, struct bch_sb *sb,
{
struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
- prt_printf(out, "KFD: %llu", BCH_CRYPT_KDF_TYPE(crypt));
- prt_newline(out);
- prt_printf(out, "scrypt n: %llu", BCH_KDF_SCRYPT_N(crypt));
- prt_newline(out);
- prt_printf(out, "scrypt r: %llu", BCH_KDF_SCRYPT_R(crypt));
- prt_newline(out);
- prt_printf(out, "scrypt p: %llu", BCH_KDF_SCRYPT_P(crypt));
- prt_newline(out);
+ prt_printf(out, "KFD: %llu\n", BCH_CRYPT_KDF_TYPE(crypt));
+ prt_printf(out, "scrypt n: %llu\n", BCH_KDF_SCRYPT_N(crypt));
+ prt_printf(out, "scrypt r: %llu\n", BCH_KDF_SCRYPT_R(crypt));
+ prt_printf(out, "scrypt p: %llu\n", BCH_KDF_SCRYPT_P(crypt));
}
const struct bch_sb_field_ops bch_sb_field_ops_crypt = {
diff --git a/fs/bcachefs/checksum.h b/fs/bcachefs/checksum.h
index 1b8c2c1016dc..e40499fde9a4 100644
--- a/fs/bcachefs/checksum.h
+++ b/fs/bcachefs/checksum.h
@@ -61,11 +61,12 @@ static inline void bch2_csum_err_msg(struct printbuf *out,
struct bch_csum expected,
struct bch_csum got)
{
- prt_printf(out, "checksum error: got ");
+ prt_str(out, "checksum error, type ");
+ bch2_prt_csum_type(out, type);
+ prt_str(out, ": got ");
bch2_csum_to_text(out, type, got);
prt_str(out, " should be ");
bch2_csum_to_text(out, type, expected);
- prt_printf(out, " type %s", bch2_csum_types[type]);
}
int bch2_chacha_encrypt_key(struct bch_key *, struct nonce, void *, size_t);
diff --git a/fs/bcachefs/compress.h b/fs/bcachefs/compress.h
index 58c2eb45570f..607fd5e232c9 100644
--- a/fs/bcachefs/compress.h
+++ b/fs/bcachefs/compress.h
@@ -47,14 +47,6 @@ static inline enum bch_compression_type bch2_compression_opt_to_type(unsigned v)
return __bch2_compression_opt_to_type[bch2_compression_decode(v).type];
}
-static inline void bch2_prt_compression_type(struct printbuf *out, enum bch_compression_type type)
-{
- if (type < BCH_COMPRESSION_TYPE_NR)
- prt_str(out, __bch2_compression_types[type]);
- else
- prt_printf(out, "(invalid compression type %u)", type);
-}
-
int bch2_bio_uncompress_inplace(struct bch_fs *, struct bio *,
struct bch_extent_crc_unpacked *);
int bch2_bio_uncompress(struct bch_fs *, struct bio *, struct bio *,
diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c
index 4150feca42a2..0d807c2ce9c6 100644
--- a/fs/bcachefs/data_update.c
+++ b/fs/bcachefs/data_update.c
@@ -14,6 +14,7 @@
#include "move.h"
#include "nocow_locking.h"
#include "rebalance.h"
+#include "snapshot.h"
#include "subvolume.h"
#include "trace.h"
@@ -105,7 +106,7 @@ static int __bch2_data_update_index_update(struct btree_trans *trans,
bch2_trans_iter_init(trans, &iter, m->btree_id,
bkey_start_pos(&bch2_keylist_front(keys)->k),
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+ BTREE_ITER_slots|BTREE_ITER_intent);
while (1) {
struct bkey_s_c k;
@@ -202,6 +203,8 @@ restart_drop_conflicting_replicas:
/* Now, drop excess replicas: */
restart_drop_extra_replicas:
+
+ rcu_read_lock();
bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs(bkey_i_to_s(insert)), p, entry) {
unsigned ptr_durability = bch2_extent_ptr_durability(c, &p);
@@ -213,6 +216,7 @@ restart_drop_extra_replicas:
goto restart_drop_extra_replicas;
}
}
+ rcu_read_unlock();
/* Finally, add the pointers we just wrote: */
extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
@@ -287,7 +291,7 @@ restart_drop_extra_replicas:
k.k->p, insert->k.p) ?:
bch2_bkey_set_needs_rebalance(c, insert, &op->opts) ?:
bch2_trans_update(trans, &iter, insert,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
+ BTREE_UPDATE_internal_snapshot_node) ?:
bch2_trans_commit(trans, &op->res,
NULL,
BCH_TRANS_COMMIT_no_check_rw|
@@ -356,10 +360,11 @@ void bch2_data_update_exit(struct data_update *update)
bch2_bkey_ptrs_c(bkey_i_to_s_c(update->k.k));
bkey_for_each_ptr(ptrs, ptr) {
+ struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev);
if (c->opts.nocow_enabled)
bch2_bucket_nocow_unlock(&c->nocow_locks,
- PTR_BUCKET_POS(c, ptr), 0);
- percpu_ref_put(&bch_dev_bkey_exists(c, ptr->dev)->ref);
+ PTR_BUCKET_POS(ca, ptr), 0);
+ bch2_dev_put(ca);
}
bch2_bkey_buf_exit(&update->k, c);
@@ -385,8 +390,10 @@ static void bch2_update_unwritten_extent(struct btree_trans *trans,
while (bio_sectors(bio)) {
unsigned sectors = bio_sectors(bio);
+ bch2_trans_begin(trans);
+
bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
- BTREE_ITER_SLOTS);
+ BTREE_ITER_slots);
ret = lockrestart_do(trans, ({
k = bch2_btree_iter_peek_slot(&iter);
bkey_err(k);
@@ -464,7 +471,6 @@ int bch2_extent_drop_ptrs(struct btree_trans *trans,
while (data_opts.kill_ptrs) {
unsigned i = 0, drop = __fls(data_opts.kill_ptrs);
- struct bch_extent_ptr *ptr;
bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, i++ == drop);
data_opts.kill_ptrs ^= 1U << drop;
@@ -479,15 +485,15 @@ int bch2_extent_drop_ptrs(struct btree_trans *trans,
/*
* Since we're not inserting through an extent iterator
- * (BTREE_ITER_ALL_SNAPSHOTS iterators aren't extent iterators),
+ * (BTREE_ITER_all_snapshots iterators aren't extent iterators),
* we aren't using the extent overwrite path to delete, we're
* just using the normal key deletion path:
*/
- if (bkey_deleted(&n->k) && !(iter->flags & BTREE_ITER_IS_EXTENTS))
+ if (bkey_deleted(&n->k) && !(iter->flags & BTREE_ITER_is_extents))
n->k.size = 0;
return bch2_trans_relock(trans) ?:
- bch2_trans_update(trans, iter, n, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
+ bch2_trans_update(trans, iter, n, BTREE_UPDATE_internal_snapshot_node) ?:
bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
}
@@ -509,6 +515,14 @@ int bch2_data_update_init(struct btree_trans *trans,
unsigned ptrs_locked = 0;
int ret = 0;
+ /*
+ * fs is corrupt we have a key for a snapshot node that doesn't exist,
+ * and we have to check for this because we go rw before repairing the
+ * snapshots table - just skip it, we can move it later.
+ */
+ if (unlikely(k.k->p.snapshot && !bch2_snapshot_equiv(c, k.k->p.snapshot)))
+ return -BCH_ERR_data_update_done;
+
bch2_bkey_buf_init(&m->k);
bch2_bkey_buf_reassemble(&m->k, c, k);
m->btree_id = btree_id;
@@ -530,15 +544,26 @@ int bch2_data_update_init(struct btree_trans *trans,
m->op.compression_opt = background_compression(io_opts);
m->op.watermark = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK;
- bkey_for_each_ptr(ptrs, ptr)
- percpu_ref_get(&bch_dev_bkey_exists(c, ptr->dev)->ref);
+ bkey_for_each_ptr(ptrs, ptr) {
+ if (!bch2_dev_tryget(c, ptr->dev)) {
+ bkey_for_each_ptr(ptrs, ptr2) {
+ if (ptr2 == ptr)
+ break;
+ bch2_dev_put(bch2_dev_have_ref(c, ptr2->dev));
+ }
+ return -BCH_ERR_data_update_done;
+ }
+ }
unsigned durability_have = 0, durability_removing = 0;
i = 0;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ struct bch_dev *ca = bch2_dev_have_ref(c, p.ptr.dev);
+ struct bpos bucket = PTR_BUCKET_POS(ca, &p.ptr);
bool locked;
+ rcu_read_lock();
if (((1U << i) & m->data_opts.rewrite_ptrs)) {
BUG_ON(p.ptr.cached);
@@ -552,6 +577,7 @@ int bch2_data_update_init(struct btree_trans *trans,
bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
durability_have += bch2_extent_ptr_durability(c, &p);
}
+ rcu_read_unlock();
/*
* op->csum_type is normally initialized from the fs/file's
@@ -570,16 +596,13 @@ int bch2_data_update_init(struct btree_trans *trans,
if (ctxt) {
move_ctxt_wait_event(ctxt,
(locked = bch2_bucket_nocow_trylock(&c->nocow_locks,
- PTR_BUCKET_POS(c, &p.ptr), 0)) ||
- (!atomic_read(&ctxt->read_sectors) &&
- !atomic_read(&ctxt->write_sectors)));
+ bucket, 0)) ||
+ list_empty(&ctxt->ios));
if (!locked)
- bch2_bucket_nocow_lock(&c->nocow_locks,
- PTR_BUCKET_POS(c, &p.ptr), 0);
+ bch2_bucket_nocow_lock(&c->nocow_locks, bucket, 0);
} else {
- if (!bch2_bucket_nocow_trylock(&c->nocow_locks,
- PTR_BUCKET_POS(c, &p.ptr), 0)) {
+ if (!bch2_bucket_nocow_trylock(&c->nocow_locks, bucket, 0)) {
ret = -BCH_ERR_nocow_lock_blocked;
goto err;
}
@@ -590,6 +613,8 @@ int bch2_data_update_init(struct btree_trans *trans,
i++;
}
+ unsigned durability_required = max(0, (int) (io_opts.data_replicas - durability_have));
+
/*
* If current extent durability is less than io_opts.data_replicas,
* we're not trying to rereplicate the extent up to data_replicas here -
@@ -599,7 +624,7 @@ int bch2_data_update_init(struct btree_trans *trans,
* rereplicate, currently, so that users don't get an unexpected -ENOSPC
*/
if (!(m->data_opts.write_flags & BCH_WRITE_CACHED) &&
- durability_have >= io_opts.data_replicas) {
+ !durability_required) {
m->data_opts.kill_ptrs |= m->data_opts.rewrite_ptrs;
m->data_opts.rewrite_ptrs = 0;
/* if iter == NULL, it's just a promote */
@@ -608,11 +633,18 @@ int bch2_data_update_init(struct btree_trans *trans,
goto done;
}
- m->op.nr_replicas = min(durability_removing, io_opts.data_replicas - durability_have) +
+ m->op.nr_replicas = min(durability_removing, durability_required) +
m->data_opts.extra_replicas;
- m->op.nr_replicas_required = m->op.nr_replicas;
- BUG_ON(!m->op.nr_replicas);
+ /*
+ * If device(s) were set to durability=0 after data was written to them
+ * we can end up with a duribilty=0 extent, and the normal algorithm
+ * that tries not to increase durability doesn't work:
+ */
+ if (!(durability_have + durability_removing))
+ m->op.nr_replicas = max((unsigned) m->op.nr_replicas, 1);
+
+ m->op.nr_replicas_required = m->op.nr_replicas;
if (reserve_sectors) {
ret = bch2_disk_reservation_add(c, &m->op.res, reserve_sectors,
@@ -632,10 +664,11 @@ int bch2_data_update_init(struct btree_trans *trans,
err:
i = 0;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ struct bch_dev *ca = bch2_dev_have_ref(c, p.ptr.dev);
+ struct bpos bucket = PTR_BUCKET_POS(ca, &p.ptr);
if ((1U << i) & ptrs_locked)
- bch2_bucket_nocow_unlock(&c->nocow_locks,
- PTR_BUCKET_POS(c, &p.ptr), 0);
- percpu_ref_put(&bch_dev_bkey_exists(c, p.ptr.dev)->ref);
+ bch2_bucket_nocow_unlock(&c->nocow_locks, bucket, 0);
+ bch2_dev_put(ca);
i++;
}
diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c
index 208ce6f0fc43..51cbf3928361 100644
--- a/fs/bcachefs/debug.c
+++ b/fs/bcachefs/debug.c
@@ -13,6 +13,7 @@
#include "btree_iter.h"
#include "btree_locking.h"
#include "btree_update.h"
+#include "btree_update_interior.h"
#include "buckets.h"
#include "debug.h"
#include "error.h"
@@ -36,11 +37,11 @@ static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b,
struct btree_node *n_ondisk = c->verify_ondisk;
struct btree_node *n_sorted = c->verify_data->data;
struct bset *sorted, *inmemory = &b->data->keys;
- struct bch_dev *ca = bch_dev_bkey_exists(c, pick.ptr.dev);
struct bio *bio;
bool failed = false, saw_error = false;
- if (!bch2_dev_get_ioref(ca, READ))
+ struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
+ if (!ca)
return false;
bio = bio_alloc_bioset(ca->disk_sb.bdev,
@@ -193,8 +194,8 @@ void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c,
return;
}
- ca = bch_dev_bkey_exists(c, pick.ptr.dev);
- if (!bch2_dev_get_ioref(ca, READ)) {
+ ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
+ if (!ca) {
prt_printf(out, "error getting device to read from: not online\n");
return;
}
@@ -374,8 +375,8 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf,
return flush_buf(i) ?:
bch2_trans_run(i->c,
for_each_btree_key(trans, iter, i->id, i->from,
- BTREE_ITER_PREFETCH|
- BTREE_ITER_ALL_SNAPSHOTS, k, ({
+ BTREE_ITER_prefetch|
+ BTREE_ITER_all_snapshots, k, ({
bch2_bkey_val_to_text(&i->buf, i->c, k);
prt_newline(&i->buf);
bch2_trans_unlock(trans);
@@ -458,8 +459,8 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
return flush_buf(i) ?:
bch2_trans_run(i->c,
for_each_btree_key(trans, iter, i->id, i->from,
- BTREE_ITER_PREFETCH|
- BTREE_ITER_ALL_SNAPSHOTS, k, ({
+ BTREE_ITER_prefetch|
+ BTREE_ITER_all_snapshots, k, ({
struct btree_path_level *l =
&btree_iter_path(trans, &iter)->l[0];
struct bkey_packed *_k =
@@ -491,51 +492,26 @@ static void bch2_cached_btree_node_to_text(struct printbuf *out, struct bch_fs *
if (!out->nr_tabstops)
printbuf_tabstop_push(out, 32);
- prt_printf(out, "%px btree=%s l=%u ",
- b,
- bch2_btree_id_str(b->c.btree_id),
- b->c.level);
- prt_newline(out);
+ prt_printf(out, "%px btree=%s l=%u\n", b, bch2_btree_id_str(b->c.btree_id), b->c.level);
printbuf_indent_add(out, 2);
bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
prt_newline(out);
- prt_printf(out, "flags: ");
- prt_tab(out);
+ prt_printf(out, "flags:\t");
prt_bitflags(out, bch2_btree_node_flags, b->flags);
prt_newline(out);
- prt_printf(out, "pcpu read locks: ");
- prt_tab(out);
- prt_printf(out, "%u", b->c.lock.readers != NULL);
- prt_newline(out);
-
- prt_printf(out, "written:");
- prt_tab(out);
- prt_printf(out, "%u", b->written);
- prt_newline(out);
-
- prt_printf(out, "writes blocked:");
- prt_tab(out);
- prt_printf(out, "%u", !list_empty_careful(&b->write_blocked));
- prt_newline(out);
+ prt_printf(out, "pcpu read locks:\t%u\n", b->c.lock.readers != NULL);
+ prt_printf(out, "written:\t%u\n", b->written);
+ prt_printf(out, "writes blocked:\t%u\n", !list_empty_careful(&b->write_blocked));
+ prt_printf(out, "will make reachable:\t%lx\n", b->will_make_reachable);
- prt_printf(out, "will make reachable:");
- prt_tab(out);
- prt_printf(out, "%lx", b->will_make_reachable);
- prt_newline(out);
-
- prt_printf(out, "journal pin %px:", &b->writes[0].journal);
- prt_tab(out);
- prt_printf(out, "%llu", b->writes[0].journal.seq);
- prt_newline(out);
-
- prt_printf(out, "journal pin %px:", &b->writes[1].journal);
- prt_tab(out);
- prt_printf(out, "%llu", b->writes[1].journal.seq);
- prt_newline(out);
+ prt_printf(out, "journal pin %px:\t%llu\n",
+ &b->writes[0].journal, b->writes[0].journal.seq);
+ prt_printf(out, "journal pin %px:\t%llu\n",
+ &b->writes[1].journal, b->writes[1].journal.seq);
printbuf_indent_sub(out, 2);
}
@@ -624,8 +600,7 @@ restart:
bch2_btree_trans_to_text(&i->buf, trans);
- prt_printf(&i->buf, "backtrace:");
- prt_newline(&i->buf);
+ prt_printf(&i->buf, "backtrace:\n");
printbuf_indent_add(&i->buf, 2);
bch2_prt_task_backtrace(&i->buf, task, 0, GFP_KERNEL);
printbuf_indent_sub(&i->buf, 2);
@@ -668,7 +643,7 @@ static ssize_t bch2_journal_pins_read(struct file *file, char __user *buf,
i->size = size;
i->ret = 0;
- do {
+ while (1) {
err = flush_buf(i);
if (err)
return err;
@@ -676,9 +651,12 @@ static ssize_t bch2_journal_pins_read(struct file *file, char __user *buf,
if (!i->size)
break;
+ if (done)
+ break;
+
done = bch2_journal_seq_pins_to_text(&i->buf, &c->journal, &i->iter);
i->iter++;
- } while (!done);
+ }
if (i->buf.allocation_failure)
return -ENOMEM;
@@ -693,13 +671,45 @@ static const struct file_operations journal_pins_ops = {
.read = bch2_journal_pins_read,
};
+static ssize_t bch2_btree_updates_read(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct dump_iter *i = file->private_data;
+ struct bch_fs *c = i->c;
+ int err;
+
+ i->ubuf = buf;
+ i->size = size;
+ i->ret = 0;
+
+ if (!i->iter) {
+ bch2_btree_updates_to_text(&i->buf, c);
+ i->iter++;
+ }
+
+ err = flush_buf(i);
+ if (err)
+ return err;
+
+ if (i->buf.allocation_failure)
+ return -ENOMEM;
+
+ return i->ret;
+}
+
+static const struct file_operations btree_updates_ops = {
+ .owner = THIS_MODULE,
+ .open = bch2_dump_open,
+ .release = bch2_dump_release,
+ .read = bch2_btree_updates_read,
+};
+
static int btree_transaction_stats_open(struct inode *inode, struct file *file)
{
struct bch_fs *c = inode->i_private;
struct dump_iter *i;
i = kzalloc(sizeof(struct dump_iter), GFP_KERNEL);
-
if (!i)
return -ENOMEM;
@@ -746,25 +756,20 @@ static ssize_t btree_transaction_stats_read(struct file *file, char __user *buf,
!bch2_btree_transaction_fns[i->iter])
break;
- prt_printf(&i->buf, "%s: ", bch2_btree_transaction_fns[i->iter]);
- prt_newline(&i->buf);
+ prt_printf(&i->buf, "%s:\n", bch2_btree_transaction_fns[i->iter]);
printbuf_indent_add(&i->buf, 2);
mutex_lock(&s->lock);
- prt_printf(&i->buf, "Max mem used: %u", s->max_mem);
- prt_newline(&i->buf);
-
- prt_printf(&i->buf, "Transaction duration:");
- prt_newline(&i->buf);
+ prt_printf(&i->buf, "Max mem used: %u\n", s->max_mem);
+ prt_printf(&i->buf, "Transaction duration:\n");
printbuf_indent_add(&i->buf, 2);
bch2_time_stats_to_text(&i->buf, &s->duration);
printbuf_indent_sub(&i->buf, 2);
if (IS_ENABLED(CONFIG_BCACHEFS_LOCK_TIME_STATS)) {
- prt_printf(&i->buf, "Lock hold times:");
- prt_newline(&i->buf);
+ prt_printf(&i->buf, "Lock hold times:\n");
printbuf_indent_add(&i->buf, 2);
bch2_time_stats_to_text(&i->buf, &s->lock_hold_times);
@@ -772,8 +777,7 @@ static ssize_t btree_transaction_stats_read(struct file *file, char __user *buf,
}
if (s->max_paths_text) {
- prt_printf(&i->buf, "Maximum allocated btree paths (%u):", s->nr_max_paths);
- prt_newline(&i->buf);
+ prt_printf(&i->buf, "Maximum allocated btree paths (%u):\n", s->nr_max_paths);
printbuf_indent_add(&i->buf, 2);
prt_str_indented(&i->buf, s->max_paths_text);
@@ -866,6 +870,20 @@ void bch2_fs_debug_exit(struct bch_fs *c)
debugfs_remove_recursive(c->fs_debug_dir);
}
+static void bch2_fs_debug_btree_init(struct bch_fs *c, struct btree_debug *bd)
+{
+ struct dentry *d;
+
+ d = debugfs_create_dir(bch2_btree_id_str(bd->id), c->btree_debug_dir);
+
+ debugfs_create_file("keys", 0400, d, bd, &btree_debug_ops);
+
+ debugfs_create_file("formats", 0400, d, bd, &btree_format_debug_ops);
+
+ debugfs_create_file("bfloat-failed", 0400, d, bd,
+ &bfloat_failed_debug_ops);
+}
+
void bch2_fs_debug_init(struct bch_fs *c)
{
struct btree_debug *bd;
@@ -888,6 +906,9 @@ void bch2_fs_debug_init(struct bch_fs *c)
debugfs_create_file("journal_pins", 0400, c->fs_debug_dir,
c->btree_debug, &journal_pins_ops);
+ debugfs_create_file("btree_updates", 0400, c->fs_debug_dir,
+ c->btree_debug, &btree_updates_ops);
+
debugfs_create_file("btree_transaction_stats", 0400, c->fs_debug_dir,
c, &btree_transaction_stats_op);
@@ -902,21 +923,7 @@ void bch2_fs_debug_init(struct bch_fs *c)
bd < c->btree_debug + ARRAY_SIZE(c->btree_debug);
bd++) {
bd->id = bd - c->btree_debug;
- debugfs_create_file(bch2_btree_id_str(bd->id),
- 0400, c->btree_debug_dir, bd,
- &btree_debug_ops);
-
- snprintf(name, sizeof(name), "%s-formats",
- bch2_btree_id_str(bd->id));
-
- debugfs_create_file(name, 0400, c->btree_debug_dir, bd,
- &btree_format_debug_ops);
-
- snprintf(name, sizeof(name), "%s-bfloat-failed",
- bch2_btree_id_str(bd->id));
-
- debugfs_create_file(name, 0400, c->btree_debug_dir, bd,
- &bfloat_failed_debug_ops);
+ bch2_fs_debug_btree_init(c, bd);
}
}
diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c
index d37bd07afbfe..6bbf9a7d9e4d 100644
--- a/fs/bcachefs/dirent.c
+++ b/fs/bcachefs/dirent.c
@@ -98,7 +98,7 @@ const struct bch_hash_desc bch2_dirent_hash_desc = {
};
int bch2_dirent_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
struct printbuf *err)
{
struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
@@ -118,7 +118,7 @@ int bch2_dirent_invalid(struct bch_fs *c, struct bkey_s_c k,
* Check new keys don't exceed the max length
* (older keys may be larger.)
*/
- bkey_fsck_err_on((flags & BKEY_INVALID_COMMIT) && d_name.len > BCH_NAME_MAX, c, err,
+ bkey_fsck_err_on((flags & BCH_VALIDATE_commit) && d_name.len > BCH_NAME_MAX, c, err,
dirent_name_too_long,
"dirent name too big (%u > %u)",
d_name.len, BCH_NAME_MAX);
@@ -205,7 +205,7 @@ int bch2_dirent_create_snapshot(struct btree_trans *trans,
const struct bch_hash_info *hash_info,
u8 type, const struct qstr *name, u64 dst_inum,
u64 *dir_offset,
- bch_str_hash_flags_t str_hash_flags)
+ enum btree_iter_update_trigger_flags flags)
{
subvol_inum dir_inum = { .subvol = dir_subvol, .inum = dir };
struct bkey_i_dirent *dirent;
@@ -220,9 +220,8 @@ int bch2_dirent_create_snapshot(struct btree_trans *trans,
dirent->k.p.snapshot = snapshot;
ret = bch2_hash_set_in_snapshot(trans, bch2_dirent_hash_desc, hash_info,
- dir_inum, snapshot,
- &dirent->k_i, str_hash_flags,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ dir_inum, snapshot, &dirent->k_i,
+ flags|BTREE_UPDATE_internal_snapshot_node);
*dir_offset = dirent->k.p.offset;
return ret;
@@ -232,7 +231,7 @@ int bch2_dirent_create(struct btree_trans *trans, subvol_inum dir,
const struct bch_hash_info *hash_info,
u8 type, const struct qstr *name, u64 dst_inum,
u64 *dir_offset,
- bch_str_hash_flags_t str_hash_flags)
+ enum btree_iter_update_trigger_flags flags)
{
struct bkey_i_dirent *dirent;
int ret;
@@ -243,7 +242,7 @@ int bch2_dirent_create(struct btree_trans *trans, subvol_inum dir,
return ret;
ret = bch2_hash_set(trans, bch2_dirent_hash_desc, hash_info,
- dir, &dirent->k_i, str_hash_flags);
+ dir, &dirent->k_i, flags);
*dir_offset = dirent->k.p.offset;
return ret;
@@ -272,7 +271,7 @@ int bch2_dirent_read_target(struct btree_trans *trans, subvol_inum dir,
} else {
target->subvol = le32_to_cpu(d.v->d_child_subvol);
- ret = bch2_subvolume_get(trans, target->subvol, true, BTREE_ITER_CACHED, &s);
+ ret = bch2_subvolume_get(trans, target->subvol, true, BTREE_ITER_cached, &s);
target->inum = le64_to_cpu(s.inode);
}
@@ -301,13 +300,9 @@ int bch2_dirent_rename(struct btree_trans *trans,
memset(dst_inum, 0, sizeof(*dst_inum));
/* Lookup src: */
- ret = bch2_hash_lookup(trans, &src_iter, bch2_dirent_hash_desc,
- src_hash, src_dir, src_name,
- BTREE_ITER_INTENT);
- if (ret)
- goto out;
-
- old_src = bch2_btree_iter_peek_slot(&src_iter);
+ old_src = bch2_hash_lookup(trans, &src_iter, bch2_dirent_hash_desc,
+ src_hash, src_dir, src_name,
+ BTREE_ITER_intent);
ret = bkey_err(old_src);
if (ret)
goto out;
@@ -329,13 +324,9 @@ int bch2_dirent_rename(struct btree_trans *trans,
if (ret)
goto out;
} else {
- ret = bch2_hash_lookup(trans, &dst_iter, bch2_dirent_hash_desc,
- dst_hash, dst_dir, dst_name,
- BTREE_ITER_INTENT);
- if (ret)
- goto out;
-
- old_dst = bch2_btree_iter_peek_slot(&dst_iter);
+ old_dst = bch2_hash_lookup(trans, &dst_iter, bch2_dirent_hash_desc,
+ dst_hash, dst_dir, dst_name,
+ BTREE_ITER_intent);
ret = bkey_err(old_dst);
if (ret)
goto out;
@@ -450,7 +441,7 @@ out_set_src:
if (delete_src) {
bch2_btree_iter_set_snapshot(&src_iter, old_src.k->p.snapshot);
ret = bch2_btree_iter_traverse(&src_iter) ?:
- bch2_btree_delete_at(trans, &src_iter, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ bch2_btree_delete_at(trans, &src_iter, BTREE_UPDATE_internal_snapshot_node);
if (ret)
goto out;
}
@@ -458,7 +449,7 @@ out_set_src:
if (delete_dst) {
bch2_btree_iter_set_snapshot(&dst_iter, old_dst.k->p.snapshot);
ret = bch2_btree_iter_traverse(&dst_iter) ?:
- bch2_btree_delete_at(trans, &dst_iter, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ bch2_btree_delete_at(trans, &dst_iter, BTREE_UPDATE_internal_snapshot_node);
if (ret)
goto out;
}
@@ -479,13 +470,9 @@ int bch2_dirent_lookup_trans(struct btree_trans *trans,
const struct qstr *name, subvol_inum *inum,
unsigned flags)
{
- int ret = bch2_hash_lookup(trans, iter, bch2_dirent_hash_desc,
- hash_info, dir, name, flags);
- if (ret)
- return ret;
-
- struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
- ret = bkey_err(k);
+ struct bkey_s_c k = bch2_hash_lookup(trans, iter, bch2_dirent_hash_desc,
+ hash_info, dir, name, flags);
+ int ret = bkey_err(k);
if (ret)
goto err;
@@ -541,16 +528,26 @@ int bch2_empty_dir_trans(struct btree_trans *trans, subvol_inum dir)
bch2_empty_dir_snapshot(trans, dir.inum, dir.subvol, snapshot);
}
+static int bch2_dir_emit(struct dir_context *ctx, struct bkey_s_c_dirent d, subvol_inum target)
+{
+ struct qstr name = bch2_dirent_get_name(d);
+ bool ret = dir_emit(ctx, name.name,
+ name.len,
+ target.inum,
+ vfs_d_type(d.v->d_type));
+ if (ret)
+ ctx->pos = d.k->p.offset + 1;
+ return ret;
+}
+
int bch2_readdir(struct bch_fs *c, subvol_inum inum, struct dir_context *ctx)
{
struct btree_trans *trans = bch2_trans_get(c);
struct btree_iter iter;
struct bkey_s_c k;
- struct bkey_s_c_dirent dirent;
subvol_inum target;
u32 snapshot;
struct bkey_buf sk;
- struct qstr name;
int ret;
bch2_bkey_buf_init(&sk);
@@ -567,7 +564,9 @@ retry:
if (k.k->type != KEY_TYPE_dirent)
continue;
- dirent = bkey_s_c_to_dirent(k);
+ /* dir_emit() can fault and block: */
+ bch2_bkey_buf_reassemble(&sk, c, k);
+ struct bkey_s_c_dirent dirent = bkey_i_to_s_c_dirent(sk.k);
ret = bch2_dirent_read_target(trans, inum, dirent, &target);
if (ret < 0)
@@ -575,28 +574,22 @@ retry:
if (ret)
continue;
- /* dir_emit() can fault and block: */
- bch2_bkey_buf_reassemble(&sk, c, k);
- dirent = bkey_i_to_s_c_dirent(sk.k);
- bch2_trans_unlock(trans);
-
- name = bch2_dirent_get_name(dirent);
-
- ctx->pos = dirent.k->p.offset;
- if (!dir_emit(ctx, name.name,
- name.len,
- target.inum,
- vfs_d_type(dirent.v->d_type)))
- break;
- ctx->pos = dirent.k->p.offset + 1;
-
/*
* read_target looks up subvolumes, we can overflow paths if the
* directory has many subvolumes in it
+ *
+ * XXX: btree_trans_too_many_iters() is something we'd like to
+ * get rid of, and there's no good reason to be using it here
+ * except that we don't yet have a for_each_btree_key() helper
+ * that does subvolume_get_snapshot().
*/
- ret = btree_trans_too_many_iters(trans);
- if (ret)
+ ret = drop_locks_do(trans,
+ bch2_dir_emit(ctx, dirent, target)) ?:
+ btree_trans_too_many_iters(trans);
+ if (ret) {
+ ret = ret < 0 ? ret : 0;
break;
+ }
}
bch2_trans_iter_exit(trans, &iter);
err:
diff --git a/fs/bcachefs/dirent.h b/fs/bcachefs/dirent.h
index bee55cca2aa0..24037e6e0a09 100644
--- a/fs/bcachefs/dirent.h
+++ b/fs/bcachefs/dirent.h
@@ -4,11 +4,11 @@
#include "str_hash.h"
-enum bkey_invalid_flags;
+enum bch_validate_flags;
extern const struct bch_hash_desc bch2_dirent_hash_desc;
int bch2_dirent_invalid(struct bch_fs *, struct bkey_s_c,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
void bch2_dirent_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
#define bch2_bkey_ops_dirent ((struct bkey_ops) { \
@@ -38,11 +38,11 @@ int bch2_dirent_read_target(struct btree_trans *, subvol_inum,
int bch2_dirent_create_snapshot(struct btree_trans *, u32, u64, u32,
const struct bch_hash_info *, u8,
const struct qstr *, u64, u64 *,
- bch_str_hash_flags_t);
+ enum btree_iter_update_trigger_flags);
int bch2_dirent_create(struct btree_trans *, subvol_inum,
const struct bch_hash_info *, u8,
const struct qstr *, u64, u64 *,
- bch_str_hash_flags_t);
+ enum btree_iter_update_trigger_flags);
static inline unsigned vfs_d_type(unsigned type)
{
diff --git a/fs/bcachefs/disk_groups.c b/fs/bcachefs/disk_groups.c
index 06a7df529b40..521a86df5e52 100644
--- a/fs/bcachefs/disk_groups.c
+++ b/fs/bcachefs/disk_groups.c
@@ -18,9 +18,8 @@ static int group_cmp(const void *_l, const void *_r)
strncmp(l->label, r->label, sizeof(l->label));
}
-static int bch2_sb_disk_groups_validate(struct bch_sb *sb,
- struct bch_sb_field *f,
- struct printbuf *err)
+static int bch2_sb_disk_groups_validate(struct bch_sb *sb, struct bch_sb_field *f,
+ enum bch_validate_flags flags, struct printbuf *err)
{
struct bch_sb_field_disk_groups *groups =
field_to_type(f, disk_groups);
@@ -177,7 +176,7 @@ int bch2_sb_disk_groups_to_cpu(struct bch_fs *c)
struct bch_member m = bch2_sb_member_get(c->disk_sb.sb, i);
struct bch_disk_group_cpu *dst;
- if (!bch2_member_exists(&m))
+ if (!bch2_member_alive(&m))
continue;
g = BCH_MEMBER_GROUP(&m);
@@ -523,7 +522,7 @@ int bch2_opt_target_parse(struct bch_fs *c, const char *val, u64 *res,
ca = bch2_dev_lookup(c, val);
if (!IS_ERR(ca)) {
*res = dev_to_target(ca->dev_idx);
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
return 0;
}
@@ -588,7 +587,7 @@ static void bch2_target_to_text_sb(struct printbuf *out, struct bch_sb *sb, unsi
case TARGET_DEV: {
struct bch_member m = bch2_sb_member_get(sb, t.dev);
- if (bch2_dev_exists(sb, t.dev)) {
+ if (bch2_member_exists(sb, t.dev)) {
prt_printf(out, "Device ");
pr_uuid(out, m.uuid.b);
prt_printf(out, " (%u)", t.dev);
diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c
index 082075244e16..b26dc7424662 100644
--- a/fs/bcachefs/ec.c
+++ b/fs/bcachefs/ec.c
@@ -107,7 +107,7 @@ struct ec_bio {
/* Stripes btree keys: */
int bch2_stripe_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
struct printbuf *err)
{
const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
@@ -131,174 +131,221 @@ fsck_err:
void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
struct bkey_s_c k)
{
- const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
- unsigned i, nr_data = s->nr_blocks - s->nr_redundant;
+ const struct bch_stripe *sp = bkey_s_c_to_stripe(k).v;
+ struct bch_stripe s = {};
+
+ memcpy(&s, sp, min(sizeof(s), bkey_val_bytes(k.k)));
+
+ unsigned nr_data = s.nr_blocks - s.nr_redundant;
- prt_printf(out, "algo %u sectors %u blocks %u:%u csum %u gran %u",
- s->algorithm,
- le16_to_cpu(s->sectors),
- nr_data,
- s->nr_redundant,
- s->csum_type,
- 1U << s->csum_granularity_bits);
+ prt_printf(out, "algo %u sectors %u blocks %u:%u csum ",
+ s.algorithm,
+ le16_to_cpu(s.sectors),
+ nr_data,
+ s.nr_redundant);
+ bch2_prt_csum_type(out, s.csum_type);
+ prt_printf(out, " gran %u", 1U << s.csum_granularity_bits);
+
+ for (unsigned i = 0; i < s.nr_blocks; i++) {
+ const struct bch_extent_ptr *ptr = sp->ptrs + i;
+
+ if ((void *) ptr >= bkey_val_end(k))
+ break;
- for (i = 0; i < s->nr_blocks; i++) {
- const struct bch_extent_ptr *ptr = s->ptrs + i;
- struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
- u32 offset;
- u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
+ bch2_extent_ptr_to_text(out, c, ptr);
- prt_printf(out, " %u:%llu:%u", ptr->dev, b, offset);
- if (i < nr_data)
- prt_printf(out, "#%u", stripe_blockcount_get(s, i));
- prt_printf(out, " gen %u", ptr->gen);
- if (ptr_stale(ca, ptr))
- prt_printf(out, " stale");
+ if (s.csum_type < BCH_CSUM_NR &&
+ i < nr_data &&
+ stripe_blockcount_offset(&s, i) < bkey_val_bytes(k.k))
+ prt_printf(out, "#%u", stripe_blockcount_get(sp, i));
}
}
/* Triggers: */
-static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
- struct bkey_s_c_stripe s,
- unsigned idx, bool deleting)
+static int __mark_stripe_bucket(struct btree_trans *trans,
+ struct bch_dev *ca,
+ struct bkey_s_c_stripe s,
+ unsigned ptr_idx, bool deleting,
+ struct bpos bucket,
+ struct bch_alloc_v4 *a,
+ enum btree_iter_update_trigger_flags flags)
{
- struct bch_fs *c = trans->c;
- const struct bch_extent_ptr *ptr = &s.v->ptrs[idx];
- struct btree_iter iter;
- struct bkey_i_alloc_v4 *a;
- enum bch_data_type data_type = idx >= s.v->nr_blocks - s.v->nr_redundant
- ? BCH_DATA_parity : 0;
- s64 sectors = data_type ? le16_to_cpu(s.v->sectors) : 0;
+ const struct bch_extent_ptr *ptr = s.v->ptrs + ptr_idx;
+ unsigned nr_data = s.v->nr_blocks - s.v->nr_redundant;
+ bool parity = ptr_idx >= nr_data;
+ enum bch_data_type data_type = parity ? BCH_DATA_parity : BCH_DATA_stripe;
+ s64 sectors = parity ? le16_to_cpu(s.v->sectors) : 0;
+ struct printbuf buf = PRINTBUF;
int ret = 0;
+ struct bch_fs *c = trans->c;
if (deleting)
sectors = -sectors;
- a = bch2_trans_start_alloc_update(trans, &iter, PTR_BUCKET_POS(c, ptr));
- if (IS_ERR(a))
- return PTR_ERR(a);
-
- ret = bch2_check_bucket_ref(trans, s.s_c, ptr, sectors, data_type,
- a->v.gen, a->v.data_type,
- a->v.dirty_sectors);
- if (ret)
- goto err;
-
if (!deleting) {
- if (bch2_trans_inconsistent_on(a->v.stripe ||
- a->v.stripe_redundancy, trans,
- "bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)",
- iter.pos.inode, iter.pos.offset, a->v.gen,
- bch2_data_type_str(a->v.data_type),
- a->v.dirty_sectors,
- a->v.stripe, s.k->p.offset)) {
+ if (bch2_trans_inconsistent_on(a->stripe ||
+ a->stripe_redundancy, trans,
+ "bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)\n%s",
+ bucket.inode, bucket.offset, a->gen,
+ bch2_data_type_str(a->data_type),
+ a->dirty_sectors,
+ a->stripe, s.k->p.offset,
+ (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
ret = -EIO;
goto err;
}
- if (bch2_trans_inconsistent_on(data_type && a->v.dirty_sectors, trans,
- "bucket %llu:%llu gen %u data type %s dirty_sectors %u: data already in stripe bucket %llu",
- iter.pos.inode, iter.pos.offset, a->v.gen,
- bch2_data_type_str(a->v.data_type),
- a->v.dirty_sectors,
- s.k->p.offset)) {
+ if (bch2_trans_inconsistent_on(parity && bch2_bucket_sectors_total(*a), trans,
+ "bucket %llu:%llu gen %u data type %s dirty_sectors %u cached_sectors %u: data already in parity bucket\n%s",
+ bucket.inode, bucket.offset, a->gen,
+ bch2_data_type_str(a->data_type),
+ a->dirty_sectors,
+ a->cached_sectors,
+ (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
ret = -EIO;
goto err;
}
-
- a->v.stripe = s.k->p.offset;
- a->v.stripe_redundancy = s.v->nr_redundant;
- a->v.data_type = BCH_DATA_stripe;
} else {
- if (bch2_trans_inconsistent_on(a->v.stripe != s.k->p.offset ||
- a->v.stripe_redundancy != s.v->nr_redundant, trans,
- "bucket %llu:%llu gen %u: not marked as stripe when deleting stripe %llu (got %u)",
- iter.pos.inode, iter.pos.offset, a->v.gen,
- s.k->p.offset, a->v.stripe)) {
+ if (bch2_trans_inconsistent_on(a->stripe != s.k->p.offset ||
+ a->stripe_redundancy != s.v->nr_redundant, trans,
+ "bucket %llu:%llu gen %u: not marked as stripe when deleting stripe (got %u)\n%s",
+ bucket.inode, bucket.offset, a->gen,
+ a->stripe,
+ (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
ret = -EIO;
goto err;
}
- a->v.stripe = 0;
- a->v.stripe_redundancy = 0;
- a->v.data_type = alloc_data_type(a->v, BCH_DATA_user);
+ if (bch2_trans_inconsistent_on(a->data_type != data_type, trans,
+ "bucket %llu:%llu gen %u data type %s: wrong data type when stripe, should be %s\n%s",
+ bucket.inode, bucket.offset, a->gen,
+ bch2_data_type_str(a->data_type),
+ bch2_data_type_str(data_type),
+ (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
+ ret = -EIO;
+ goto err;
+ }
+
+ if (bch2_trans_inconsistent_on(parity &&
+ (a->dirty_sectors != -sectors ||
+ a->cached_sectors), trans,
+ "bucket %llu:%llu gen %u dirty_sectors %u cached_sectors %u: wrong sectors when deleting parity block of stripe\n%s",
+ bucket.inode, bucket.offset, a->gen,
+ a->dirty_sectors,
+ a->cached_sectors,
+ (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
+ ret = -EIO;
+ goto err;
+ }
+ }
+
+ if (sectors) {
+ ret = bch2_bucket_ref_update(trans, ca, s.s_c, ptr, sectors, data_type,
+ a->gen, a->data_type, &a->dirty_sectors);
+ if (ret)
+ goto err;
}
- a->v.dirty_sectors += sectors;
- if (data_type)
- a->v.data_type = !deleting ? data_type : 0;
+ if (!deleting) {
+ a->stripe = s.k->p.offset;
+ a->stripe_redundancy = s.v->nr_redundant;
+ } else {
+ a->stripe = 0;
+ a->stripe_redundancy = 0;
+ }
- ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
- if (ret)
- goto err;
+ alloc_data_type_set(a, data_type);
err:
- bch2_trans_iter_exit(trans, &iter);
+ printbuf_exit(&buf);
return ret;
}
static int mark_stripe_bucket(struct btree_trans *trans,
- struct bkey_s_c k,
- unsigned ptr_idx,
- unsigned flags)
+ struct bkey_s_c_stripe s,
+ unsigned ptr_idx, bool deleting,
+ enum btree_iter_update_trigger_flags flags)
{
struct bch_fs *c = trans->c;
- const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
- unsigned nr_data = s->nr_blocks - s->nr_redundant;
- bool parity = ptr_idx >= nr_data;
- enum bch_data_type data_type = parity ? BCH_DATA_parity : BCH_DATA_stripe;
- s64 sectors = parity ? le16_to_cpu(s->sectors) : 0;
- const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
- struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
- struct bucket old, new, *g;
- struct printbuf buf = PRINTBUF;
+ const struct bch_extent_ptr *ptr = s.v->ptrs + ptr_idx;
int ret = 0;
- BUG_ON(!(flags & BTREE_TRIGGER_GC));
+ struct bch_dev *ca = bch2_dev_tryget(c, ptr->dev);
+ if (unlikely(!ca)) {
+ if (!(flags & BTREE_TRIGGER_overwrite))
+ ret = -EIO;
+ goto err;
+ }
- /* * XXX doesn't handle deletion */
+ struct bpos bucket = PTR_BUCKET_POS(ca, ptr);
- percpu_down_read(&c->mark_lock);
- g = PTR_GC_BUCKET(ca, ptr);
+ if (flags & BTREE_TRIGGER_transactional) {
+ struct bkey_i_alloc_v4 *a =
+ bch2_trans_start_alloc_update(trans, bucket);
+ ret = PTR_ERR_OR_ZERO(a) ?:
+ __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &a->v, flags);
+ }
- if (g->dirty_sectors ||
- (g->stripe && g->stripe != k.k->p.offset)) {
- bch2_fs_inconsistent(c,
- "bucket %u:%zu gen %u: multiple stripes using same bucket\n%s",
- ptr->dev, PTR_BUCKET_NR(ca, ptr), g->gen,
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
- ret = -EINVAL;
- goto err;
+ if (flags & BTREE_TRIGGER_gc) {
+ percpu_down_read(&c->mark_lock);
+ struct bucket *g = gc_bucket(ca, bucket.offset);
+ bucket_lock(g);
+ struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old;
+ ret = __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &new, flags);
+ if (!ret) {
+ alloc_to_bucket(g, new);
+ bch2_dev_usage_update(c, ca, &old, &new, 0, true);
+ }
+ bucket_unlock(g);
+ percpu_up_read(&c->mark_lock);
}
+err:
+ bch2_dev_put(ca);
+ return ret;
+}
+
+static int mark_stripe_buckets(struct btree_trans *trans,
+ struct bkey_s_c old, struct bkey_s_c new,
+ enum btree_iter_update_trigger_flags flags)
+{
+ const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
+ ? bkey_s_c_to_stripe(old).v : NULL;
+ const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
+ ? bkey_s_c_to_stripe(new).v : NULL;
- bucket_lock(g);
- old = *g;
+ BUG_ON(old_s && new_s && old_s->nr_blocks != new_s->nr_blocks);
- ret = bch2_check_bucket_ref(trans, k, ptr, sectors, data_type,
- g->gen, g->data_type,
- g->dirty_sectors);
- if (ret)
- goto err;
+ unsigned nr_blocks = new_s ? new_s->nr_blocks : old_s->nr_blocks;
- g->data_type = data_type;
- g->dirty_sectors += sectors;
+ for (unsigned i = 0; i < nr_blocks; i++) {
+ if (new_s && old_s &&
+ !memcmp(&new_s->ptrs[i],
+ &old_s->ptrs[i],
+ sizeof(new_s->ptrs[i])))
+ continue;
- g->stripe = k.k->p.offset;
- g->stripe_redundancy = s->nr_redundant;
- new = *g;
-err:
- bucket_unlock(g);
- if (!ret)
- bch2_dev_usage_update_m(c, ca, &old, &new);
- percpu_up_read(&c->mark_lock);
- printbuf_exit(&buf);
- return ret;
+ if (new_s) {
+ int ret = mark_stripe_bucket(trans,
+ bkey_s_c_to_stripe(new), i, false, flags);
+ if (ret)
+ return ret;
+ }
+
+ if (old_s) {
+ int ret = mark_stripe_bucket(trans,
+ bkey_s_c_to_stripe(old), i, true, flags);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
}
int bch2_trigger_stripe(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
+ enum btree_id btree, unsigned level,
struct bkey_s_c old, struct bkey_s _new,
- unsigned flags)
+ enum btree_iter_update_trigger_flags flags)
{
struct bkey_s_c new = _new.s_c;
struct bch_fs *c = trans->c;
@@ -308,7 +355,10 @@ int bch2_trigger_stripe(struct btree_trans *trans,
const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
? bkey_s_c_to_stripe(new).v : NULL;
- if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
+ if (unlikely(flags & BTREE_TRIGGER_check_repair))
+ return bch2_check_fix_ptrs(trans, btree, level, _new.s_c, flags);
+
+ if (flags & BTREE_TRIGGER_transactional) {
/*
* If the pointers aren't changing, we don't need to do anything:
*/
@@ -343,31 +393,12 @@ int bch2_trigger_stripe(struct btree_trans *trans,
return ret;
}
- unsigned nr_blocks = new_s ? new_s->nr_blocks : old_s->nr_blocks;
- for (unsigned i = 0; i < nr_blocks; i++) {
- if (new_s && old_s &&
- !memcmp(&new_s->ptrs[i],
- &old_s->ptrs[i],
- sizeof(new_s->ptrs[i])))
- continue;
-
- if (new_s) {
- int ret = bch2_trans_mark_stripe_bucket(trans,
- bkey_s_c_to_stripe(new), i, false);
- if (ret)
- return ret;
- }
-
- if (old_s) {
- int ret = bch2_trans_mark_stripe_bucket(trans,
- bkey_s_c_to_stripe(old), i, true);
- if (ret)
- return ret;
- }
- }
+ int ret = mark_stripe_buckets(trans, old, new, flags);
+ if (ret)
+ return ret;
}
- if (flags & BTREE_TRIGGER_ATOMIC) {
+ if (flags & BTREE_TRIGGER_atomic) {
struct stripe *m = genradix_ptr(&c->stripes, idx);
if (!m) {
@@ -406,7 +437,7 @@ int bch2_trigger_stripe(struct btree_trans *trans,
}
}
- if (flags & BTREE_TRIGGER_GC) {
+ if (flags & BTREE_TRIGGER_gc) {
struct gc_stripe *m =
genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL);
@@ -435,13 +466,11 @@ int bch2_trigger_stripe(struct btree_trans *trans,
*/
memset(m->block_sectors, 0, sizeof(m->block_sectors));
- for (unsigned i = 0; i < new_s->nr_blocks; i++) {
- int ret = mark_stripe_bucket(trans, new, i, flags);
- if (ret)
- return ret;
- }
+ int ret = mark_stripe_buckets(trans, old, new, flags);
+ if (ret)
+ return ret;
- int ret = bch2_update_replicas(c, new, &m->r.e,
+ ret = bch2_update_replicas(c, new, &m->r.e,
((s64) m->sectors * m->nr_redundant),
0, true);
if (ret) {
@@ -604,21 +633,21 @@ static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf)
struct bch_csum got = ec_block_checksum(buf, i, offset);
if (bch2_crc_cmp(want, got)) {
- struct printbuf err = PRINTBUF;
- struct bch_dev *ca = bch_dev_bkey_exists(c, v->ptrs[i].dev);
-
- prt_printf(&err, "stripe checksum error: expected %0llx:%0llx got %0llx:%0llx (type %s)\n",
- want.hi, want.lo,
- got.hi, got.lo,
- bch2_csum_types[v->csum_type]);
- prt_printf(&err, " for %ps at %u of\n ", (void *) _RET_IP_, i);
- bch2_bkey_val_to_text(&err, c, bkey_i_to_s_c(&buf->key));
- bch_err_ratelimited(ca, "%s", err.buf);
- printbuf_exit(&err);
+ struct bch_dev *ca = bch2_dev_tryget(c, v->ptrs[i].dev);
+ if (ca) {
+ struct printbuf err = PRINTBUF;
- clear_bit(i, buf->valid);
+ prt_str(&err, "stripe ");
+ bch2_csum_err_msg(&err, v->csum_type, want, got);
+ prt_printf(&err, " for %ps at %u of\n ", (void *) _RET_IP_, i);
+ bch2_bkey_val_to_text(&err, c, bkey_i_to_s_c(&buf->key));
+ bch_err_ratelimited(ca, "%s", err.buf);
+ printbuf_exit(&err);
+
+ bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
+ }
- bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
+ clear_bit(i, buf->valid);
break;
}
@@ -685,7 +714,7 @@ static void ec_block_endio(struct bio *bio)
bch2_blk_status_to_str(bio->bi_status)))
clear_bit(ec_bio->idx, ec_bio->buf->valid);
- if (ptr_stale(ca, ptr)) {
+ if (dev_ptr_stale(ca, ptr)) {
bch_err_ratelimited(ca->fs,
"error %s stripe: stale pointer after io",
bio_data_dir(bio) == READ ? "reading from" : "writing to");
@@ -703,25 +732,26 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
unsigned offset = 0, bytes = buf->size << 9;
struct bch_extent_ptr *ptr = &v->ptrs[idx];
- struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
enum bch_data_type data_type = idx < v->nr_blocks - v->nr_redundant
? BCH_DATA_user
: BCH_DATA_parity;
int rw = op_is_write(opf);
- if (ptr_stale(ca, ptr)) {
- bch_err_ratelimited(c,
- "error %s stripe: stale pointer",
- rw == READ ? "reading from" : "writing to");
+ struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, rw);
+ if (!ca) {
clear_bit(idx, buf->valid);
return;
}
- if (!bch2_dev_get_ioref(ca, rw)) {
+ if (dev_ptr_stale(ca, ptr)) {
+ bch_err_ratelimited(c,
+ "error %s stripe: stale pointer",
+ rw == READ ? "reading from" : "writing to");
clear_bit(idx, buf->valid);
return;
}
+
this_cpu_add(ca->io_done->sectors[rw][data_type], buf->size);
while (offset < bytes) {
@@ -767,7 +797,7 @@ static int get_stripe_key_trans(struct btree_trans *trans, u64 idx,
int ret;
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes,
- POS(0, idx), BTREE_ITER_SLOTS);
+ POS(0, idx), BTREE_ITER_slots);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1058,7 +1088,7 @@ static int ec_stripe_delete(struct btree_trans *trans, u64 idx)
int ret;
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, POS(0, idx),
- BTREE_ITER_INTENT);
+ BTREE_ITER_intent);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1129,7 +1159,7 @@ static int ec_stripe_key_update(struct btree_trans *trans,
int ret;
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes,
- new->k.p, BTREE_ITER_INTENT);
+ new->k.p, BTREE_ITER_intent);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1171,6 +1201,7 @@ err:
}
static int ec_stripe_update_extent(struct btree_trans *trans,
+ struct bch_dev *ca,
struct bpos bucket, u8 gen,
struct ec_stripe_buf *s,
struct bpos *bp_pos)
@@ -1181,13 +1212,13 @@ static int ec_stripe_update_extent(struct btree_trans *trans,
struct btree_iter iter;
struct bkey_s_c k;
const struct bch_extent_ptr *ptr_c;
- struct bch_extent_ptr *ptr, *ec_ptr = NULL;
+ struct bch_extent_ptr *ec_ptr = NULL;
struct bch_extent_stripe_ptr stripe_ptr;
struct bkey_i *n;
int ret, dev, block;
- ret = bch2_get_next_backpointer(trans, bucket, gen,
- bp_pos, &bp, BTREE_ITER_CACHED);
+ ret = bch2_get_next_backpointer(trans, ca, bucket, gen,
+ bp_pos, &bp, BTREE_ITER_cached);
if (ret)
return ret;
if (bpos_eq(*bp_pos, SPOS_MAX))
@@ -1212,7 +1243,7 @@ static int ec_stripe_update_extent(struct btree_trans *trans,
return -EIO;
}
- k = bch2_backpointer_get_key(trans, &iter, *bp_pos, bp, BTREE_ITER_INTENT);
+ k = bch2_backpointer_get_key(trans, &iter, *bp_pos, bp, BTREE_ITER_intent);
ret = bkey_err(k);
if (ret)
return ret;
@@ -1270,17 +1301,21 @@ static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_b
{
struct bch_fs *c = trans->c;
struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
- struct bch_extent_ptr bucket = v->ptrs[block];
- struct bpos bucket_pos = PTR_BUCKET_POS(c, &bucket);
+ struct bch_extent_ptr ptr = v->ptrs[block];
struct bpos bp_pos = POS_MIN;
int ret = 0;
+ struct bch_dev *ca = bch2_dev_tryget(c, ptr.dev);
+ if (!ca)
+ return -EIO;
+
+ struct bpos bucket_pos = PTR_BUCKET_POS(ca, &ptr);
+
while (1) {
ret = commit_do(trans, NULL, NULL,
BCH_TRANS_COMMIT_no_check_rw|
BCH_TRANS_COMMIT_no_enospc,
- ec_stripe_update_extent(trans, bucket_pos, bucket.gen,
- s, &bp_pos));
+ ec_stripe_update_extent(trans, ca, bucket_pos, ptr.gen, s, &bp_pos));
if (ret)
break;
if (bkey_eq(bp_pos, POS_MAX))
@@ -1289,6 +1324,7 @@ static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_b
bp_pos = bpos_nosnap_successor(bp_pos);
}
+ bch2_dev_put(ca);
return ret;
}
@@ -1319,20 +1355,18 @@ static void zero_out_rest_of_ec_bucket(struct bch_fs *c,
unsigned block,
struct open_bucket *ob)
{
- struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
- unsigned offset = ca->mi.bucket_size - ob->sectors_free;
- int ret;
-
- if (!bch2_dev_get_ioref(ca, WRITE)) {
+ struct bch_dev *ca = bch2_dev_get_ioref(c, ob->dev, WRITE);
+ if (!ca) {
s->err = -BCH_ERR_erofs_no_writes;
return;
}
+ unsigned offset = ca->mi.bucket_size - ob->sectors_free;
memset(s->new_stripe.data[block] + (offset << 9),
0,
ob->sectors_free << 9);
- ret = blkdev_issue_zeroout(ca->disk_sb.bdev,
+ int ret = blkdev_issue_zeroout(ca->disk_sb.bdev,
ob->bucket * ca->mi.bucket_size + offset,
ob->sectors_free,
GFP_KERNEL, 0);
@@ -1517,16 +1551,13 @@ void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob)
void *bch2_writepoint_ec_buf(struct bch_fs *c, struct write_point *wp)
{
struct open_bucket *ob = ec_open_bucket(c, &wp->ptrs);
- struct bch_dev *ca;
- unsigned offset;
-
if (!ob)
return NULL;
BUG_ON(!ob->ec->new_stripe.data[ob->ec_idx]);
- ca = bch_dev_bkey_exists(c, ob->dev);
- offset = ca->mi.bucket_size - ob->sectors_free;
+ struct bch_dev *ca = ob_dev(c, ob);
+ unsigned offset = ca->mi.bucket_size - ob->sectors_free;
return ob->ec->new_stripe.data[ob->ec_idx] + (offset << 9);
}
@@ -1935,7 +1966,7 @@ static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_st
}
for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos,
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
+ BTREE_ITER_slots|BTREE_ITER_intent, k, ret) {
if (bkey_gt(k.k->p, POS(0, U32_MAX))) {
if (start_pos.offset) {
start_pos = min_pos;
@@ -2125,7 +2156,7 @@ int bch2_stripes_read(struct bch_fs *c)
{
int ret = bch2_trans_run(c,
for_each_btree_key(trans, iter, BTREE_ID_stripes, POS_MIN,
- BTREE_ITER_PREFETCH, k, ({
+ BTREE_ITER_prefetch, k, ({
if (k.k->type != KEY_TYPE_stripe)
continue;
diff --git a/fs/bcachefs/ec.h b/fs/bcachefs/ec.h
index f4369b02e805..84a23eeb6249 100644
--- a/fs/bcachefs/ec.h
+++ b/fs/bcachefs/ec.h
@@ -6,14 +6,15 @@
#include "buckets_types.h"
#include "extents_types.h"
-enum bkey_invalid_flags;
+enum bch_validate_flags;
int bch2_stripe_invalid(struct bch_fs *, struct bkey_s_c,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
void bch2_stripe_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
int bch2_trigger_stripe(struct btree_trans *, enum btree_id, unsigned,
- struct bkey_s_c, struct bkey_s, unsigned);
+ struct bkey_s_c, struct bkey_s,
+ enum btree_iter_update_trigger_flags);
#define bch2_bkey_ops_stripe ((struct bkey_ops) { \
.key_invalid = bch2_stripe_invalid, \
@@ -32,6 +33,8 @@ static inline unsigned stripe_csums_per_device(const struct bch_stripe *s)
static inline unsigned stripe_csum_offset(const struct bch_stripe *s,
unsigned dev, unsigned csum_idx)
{
+ EBUG_ON(s->csum_type >= BCH_CSUM_NR);
+
unsigned csum_bytes = bch_crc_bytes[s->csum_type];
return sizeof(struct bch_stripe) +
diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h
index af25d8ec60f2..dbe35b80bc0b 100644
--- a/fs/bcachefs/errcode.h
+++ b/fs/bcachefs/errcode.h
@@ -175,6 +175,7 @@
x(EINVAL, block_size_too_small) \
x(EINVAL, bucket_size_too_small) \
x(EINVAL, device_size_too_small) \
+ x(EINVAL, device_size_too_big) \
x(EINVAL, device_not_a_member_of_filesystem) \
x(EINVAL, device_has_been_removed) \
x(EINVAL, device_splitbrain) \
@@ -252,7 +253,8 @@
x(BCH_ERR_nopromote, nopromote_in_flight) \
x(BCH_ERR_nopromote, nopromote_no_writes) \
x(BCH_ERR_nopromote, nopromote_enomem) \
- x(0, need_inode_lock)
+ x(0, need_inode_lock) \
+ x(0, invalid_snapshot_node)
enum bch_errcode {
BCH_ERR_START = 2048,
diff --git a/fs/bcachefs/error.c b/fs/bcachefs/error.c
index 043431206799..c66eeffcd7f2 100644
--- a/fs/bcachefs/error.c
+++ b/fs/bcachefs/error.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
#include "error.h"
-#include "recovery.h"
+#include "journal.h"
+#include "recovery_passes.h"
#include "super.h"
#include "thread_with_file.h"
@@ -16,7 +17,8 @@ bool bch2_inconsistent_error(struct bch_fs *c)
return false;
case BCH_ON_ERROR_ro:
if (bch2_fs_emergency_read_only(c))
- bch_err(c, "inconsistency detected - emergency read only");
+ bch_err(c, "inconsistency detected - emergency read only at journal seq %llu",
+ journal_cur_seq(&c->journal));
return true;
case BCH_ON_ERROR_panic:
panic(bch2_fmt(c, "panic after error"));
@@ -174,6 +176,21 @@ static struct fsck_err_state *fsck_err_get(struct bch_fs *c, const char *fmt)
return s;
}
+/* s/fix?/fixing/ s/recreate?/recreating/ */
+static void prt_actioning(struct printbuf *out, const char *action)
+{
+ unsigned len = strlen(action);
+
+ BUG_ON(action[len - 1] != '?');
+ --len;
+
+ if (action[len - 1] == 'e')
+ --len;
+
+ prt_bytes(out, action, len);
+ prt_str(out, "ing");
+}
+
int bch2_fsck_err(struct bch_fs *c,
enum bch_fsck_flags flags,
enum bch_sb_error_id err,
@@ -184,6 +201,7 @@ int bch2_fsck_err(struct bch_fs *c,
bool print = true, suppressing = false, inconsistent = false;
struct printbuf buf = PRINTBUF, *out = &buf;
int ret = -BCH_ERR_fsck_ignore;
+ const char *action_orig = "fix?", *action = action_orig;
if ((flags & FSCK_CAN_FIX) &&
test_bit(err, c->sb.errors_silent))
@@ -195,6 +213,19 @@ int bch2_fsck_err(struct bch_fs *c,
prt_vprintf(out, fmt, args);
va_end(args);
+ /* Custom fix/continue/recreate/etc.? */
+ if (out->buf[out->pos - 1] == '?') {
+ const char *p = strrchr(out->buf, ',');
+ if (p) {
+ out->pos = p - out->buf;
+ action = kstrdup(p + 2, GFP_KERNEL);
+ if (!action) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+ }
+
mutex_lock(&c->fsck_error_msgs_lock);
s = fsck_err_get(c, fmt);
if (s) {
@@ -206,12 +237,16 @@ int bch2_fsck_err(struct bch_fs *c,
if (s->last_msg && !strcmp(buf.buf, s->last_msg)) {
ret = s->ret;
mutex_unlock(&c->fsck_error_msgs_lock);
- printbuf_exit(&buf);
- return ret;
+ goto err;
}
kfree(s->last_msg);
s->last_msg = kstrdup(buf.buf, GFP_KERNEL);
+ if (!s->last_msg) {
+ mutex_unlock(&c->fsck_error_msgs_lock);
+ ret = -ENOMEM;
+ goto err;
+ }
if (c->opts.ratelimit_errors &&
!(flags & FSCK_NO_RATELIMIT) &&
@@ -237,7 +272,8 @@ int bch2_fsck_err(struct bch_fs *c,
inconsistent = true;
ret = -BCH_ERR_fsck_errors_not_fixed;
} else if (flags & FSCK_CAN_FIX) {
- prt_str(out, ", fixing");
+ prt_str(out, ", ");
+ prt_actioning(out, action);
ret = -BCH_ERR_fsck_fix;
} else {
prt_str(out, ", continuing");
@@ -252,16 +288,16 @@ int bch2_fsck_err(struct bch_fs *c,
: c->opts.fix_errors;
if (fix == FSCK_FIX_ask) {
- int ask;
+ prt_str(out, ", ");
+ prt_str(out, action);
- prt_str(out, ": fix?");
if (bch2_fs_stdio_redirect(c))
bch2_print(c, "%s", out->buf);
else
bch2_print_string_as_lines(KERN_ERR, out->buf);
print = false;
- ask = bch2_fsck_ask_yn(c);
+ int ask = bch2_fsck_ask_yn(c);
if (ask >= YN_ALLNO && s)
s->fix = ask == YN_ALLNO
@@ -274,10 +310,12 @@ int bch2_fsck_err(struct bch_fs *c,
} else if (fix == FSCK_FIX_yes ||
(c->opts.nochanges &&
!(flags & FSCK_CAN_IGNORE))) {
- prt_str(out, ", fixing");
+ prt_str(out, ", ");
+ prt_actioning(out, action);
ret = -BCH_ERR_fsck_fix;
} else {
- prt_str(out, ", not fixing");
+ prt_str(out, ", not ");
+ prt_actioning(out, action);
}
} else if (flags & FSCK_NEED_FSCK) {
prt_str(out, " (run fsck to correct)");
@@ -309,8 +347,6 @@ int bch2_fsck_err(struct bch_fs *c,
mutex_unlock(&c->fsck_error_msgs_lock);
- printbuf_exit(&buf);
-
if (inconsistent)
bch2_inconsistent_error(c);
@@ -320,7 +356,10 @@ int bch2_fsck_err(struct bch_fs *c,
set_bit(BCH_FS_errors_not_fixed, &c->flags);
set_bit(BCH_FS_error, &c->flags);
}
-
+err:
+ if (action != action_orig)
+ kfree(action);
+ printbuf_exit(&buf);
return ret;
}
diff --git a/fs/bcachefs/error.h b/fs/bcachefs/error.h
index ae1d6674c512..36caedf72d89 100644
--- a/fs/bcachefs/error.h
+++ b/fs/bcachefs/error.h
@@ -32,6 +32,12 @@ bool bch2_inconsistent_error(struct bch_fs *);
int bch2_topology_error(struct bch_fs *);
+#define bch2_fs_topology_error(c, ...) \
+({ \
+ bch_err(c, "btree topology error: " __VA_ARGS__); \
+ bch2_topology_error(c); \
+})
+
#define bch2_fs_inconsistent(c, ...) \
({ \
bch_err(c, __VA_ARGS__); \
diff --git a/fs/bcachefs/extent_update.c b/fs/bcachefs/extent_update.c
index b9033bb4f11c..5f4fecb358da 100644
--- a/fs/bcachefs/extent_update.c
+++ b/fs/bcachefs/extent_update.c
@@ -72,7 +72,7 @@ static int count_iters_for_insert(struct btree_trans *trans,
for_each_btree_key_norestart(trans, iter,
BTREE_ID_reflink, POS(0, idx + offset),
- BTREE_ITER_SLOTS, r_k, ret2) {
+ BTREE_ITER_slots, r_k, ret2) {
if (bkey_ge(bkey_start_pos(r_k.k), POS(0, idx + sectors)))
break;
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index 61395b113df9..469037929685 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -71,6 +71,12 @@ void bch2_mark_io_failure(struct bch_io_failures *failed,
}
}
+static inline u64 dev_latency(struct bch_fs *c, unsigned dev)
+{
+ struct bch_dev *ca = bch2_dev_rcu(c, dev);
+ return ca ? atomic64_read(&ca->cur_latency[READ]) : S64_MAX;
+}
+
/*
* returns true if p1 is better than p2:
*/
@@ -79,11 +85,8 @@ static inline bool ptr_better(struct bch_fs *c,
const struct extent_ptr_decoded p2)
{
if (likely(!p1.idx && !p2.idx)) {
- struct bch_dev *dev1 = bch_dev_bkey_exists(c, p1.ptr.dev);
- struct bch_dev *dev2 = bch_dev_bkey_exists(c, p2.ptr.dev);
-
- u64 l1 = atomic64_read(&dev1->cur_latency[READ]);
- u64 l2 = atomic64_read(&dev2->cur_latency[READ]);
+ u64 l1 = dev_latency(c, p1.ptr.dev);
+ u64 l2 = dev_latency(c, p2.ptr.dev);
/* Pick at random, biased in favor of the faster device: */
@@ -109,21 +112,21 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
struct bch_dev_io_failures *f;
- struct bch_dev *ca;
int ret = 0;
if (k.k->type == KEY_TYPE_error)
return -EIO;
+ rcu_read_lock();
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
/*
* Unwritten extent: no need to actually read, treat it as a
* hole and return 0s:
*/
- if (p.ptr.unwritten)
- return 0;
-
- ca = bch_dev_bkey_exists(c, p.ptr.dev);
+ if (p.ptr.unwritten) {
+ ret = 0;
+ break;
+ }
/*
* If there are any dirty pointers it's an error if we can't
@@ -132,7 +135,9 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
if (!ret && !p.ptr.cached)
ret = -EIO;
- if (p.ptr.cached && ptr_stale(ca, &p.ptr))
+ struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
+
+ if (p.ptr.cached && (!ca || dev_ptr_stale(ca, &p.ptr)))
continue;
f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
@@ -141,12 +146,13 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
? f->idx
: f->idx + 1;
- if (!p.idx &&
- !bch2_dev_is_readable(ca))
+ if (!p.idx && !ca)
p.idx++;
- if (bch2_force_reconstruct_read &&
- !p.idx && p.has_ec)
+ if (!p.idx && p.has_ec && bch2_force_reconstruct_read)
+ p.idx++;
+
+ if (!p.idx && !bch2_dev_is_readable(ca))
p.idx++;
if (p.idx >= (unsigned) p.has_ec + 1)
@@ -158,6 +164,7 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
*pick = p;
ret = 1;
}
+ rcu_read_unlock();
return ret;
}
@@ -165,7 +172,7 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
/* KEY_TYPE_btree_ptr: */
int bch2_btree_ptr_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
struct printbuf *err)
{
int ret = 0;
@@ -186,16 +193,26 @@ void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
}
int bch2_btree_ptr_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
struct printbuf *err)
{
+ struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
int ret = 0;
- bkey_fsck_err_on(bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX, c, err,
- btree_ptr_v2_val_too_big,
+ bkey_fsck_err_on(bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX,
+ c, err, btree_ptr_v2_val_too_big,
"value too big (%zu > %zu)",
bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX);
+ bkey_fsck_err_on(bpos_ge(bp.v->min_key, bp.k->p),
+ c, err, btree_ptr_v2_min_key_bad,
+ "min_key > key");
+
+ if (flags & BCH_VALIDATE_write)
+ bkey_fsck_err_on(!bp.v->sectors_written,
+ c, err, btree_ptr_v2_written_0,
+ "sectors_written == 0");
+
ret = bch2_bkey_ptrs_invalid(c, k, flags, err);
fsck_err:
return ret;
@@ -242,7 +259,6 @@ bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
const union bch_extent_entry *en_r;
struct extent_ptr_decoded lp, rp;
bool use_right_ptr;
- struct bch_dev *ca;
en_l = l_ptrs.start;
en_r = r_ptrs.start;
@@ -273,8 +289,12 @@ bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
return false;
/* Extents may not straddle buckets: */
- ca = bch_dev_bkey_exists(c, lp.ptr.dev);
- if (PTR_BUCKET_NR(ca, &lp.ptr) != PTR_BUCKET_NR(ca, &rp.ptr))
+ rcu_read_lock();
+ struct bch_dev *ca = bch2_dev_rcu(c, lp.ptr.dev);
+ bool same_bucket = ca && PTR_BUCKET_NR(ca, &lp.ptr) == PTR_BUCKET_NR(ca, &rp.ptr);
+ rcu_read_unlock();
+
+ if (!same_bucket)
return false;
if (lp.has_ec != rp.has_ec ||
@@ -380,7 +400,7 @@ bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
/* KEY_TYPE_reservation: */
int bch2_reservation_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
struct printbuf *err)
{
struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
@@ -662,16 +682,16 @@ static inline unsigned __extent_ptr_durability(struct bch_dev *ca, struct extent
unsigned bch2_extent_ptr_desired_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
{
- struct bch_dev *ca = bch_dev_bkey_exists(c, p->ptr.dev);
+ struct bch_dev *ca = bch2_dev_rcu(c, p->ptr.dev);
- return __extent_ptr_durability(ca, p);
+ return ca ? __extent_ptr_durability(ca, p) : 0;
}
unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
{
- struct bch_dev *ca = bch_dev_bkey_exists(c, p->ptr.dev);
+ struct bch_dev *ca = bch2_dev_rcu(c, p->ptr.dev);
- if (ca->mi.state == BCH_MEMBER_STATE_failed)
+ if (!ca || ca->mi.state == BCH_MEMBER_STATE_failed)
return 0;
return __extent_ptr_durability(ca, p);
@@ -684,8 +704,10 @@ unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
struct extent_ptr_decoded p;
unsigned durability = 0;
+ rcu_read_lock();
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
durability += bch2_extent_ptr_durability(c, &p);
+ rcu_read_unlock();
return durability;
}
@@ -697,9 +719,11 @@ static unsigned bch2_bkey_durability_safe(struct bch_fs *c, struct bkey_s_c k)
struct extent_ptr_decoded p;
unsigned durability = 0;
+ rcu_read_lock();
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
if (p.ptr.dev < c->sb.nr_devices && c->devs[p.ptr.dev])
durability += bch2_extent_ptr_durability(c, &p);
+ rcu_read_unlock();
return durability;
}
@@ -828,8 +852,6 @@ union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
{
- struct bch_extent_ptr *ptr;
-
bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
}
@@ -855,14 +877,21 @@ const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c k, unsigned
bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ struct bch_dev *ca;
+ bool ret = false;
+ rcu_read_lock();
bkey_for_each_ptr(ptrs, ptr)
if (bch2_dev_in_target(c, ptr->dev, target) &&
+ (ca = bch2_dev_rcu(c, ptr->dev)) &&
(!ptr->cached ||
- !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
- return true;
+ !dev_ptr_stale_rcu(ca, ptr))) {
+ ret = true;
+ break;
+ }
+ rcu_read_unlock();
- return false;
+ return ret;
}
bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
@@ -964,15 +993,44 @@ void bch2_extent_ptr_set_cached(struct bkey_s k, struct bch_extent_ptr *ptr)
*/
bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
{
- struct bch_extent_ptr *ptr;
+ struct bch_dev *ca;
+ rcu_read_lock();
bch2_bkey_drop_ptrs(k, ptr,
ptr->cached &&
- ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
+ (ca = bch2_dev_rcu(c, ptr->dev)) &&
+ dev_ptr_stale_rcu(ca, ptr));
+ rcu_read_unlock();
return bkey_deleted(k.k);
}
+void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *c, const struct bch_extent_ptr *ptr)
+{
+ out->atomic++;
+ rcu_read_lock();
+ struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
+ if (!ca) {
+ prt_printf(out, "ptr: %u:%llu gen %u%s", ptr->dev,
+ (u64) ptr->offset, ptr->gen,
+ ptr->cached ? " cached" : "");
+ } else {
+ u32 offset;
+ u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
+
+ prt_printf(out, "ptr: %u:%llu:%u gen %u",
+ ptr->dev, b, offset, ptr->gen);
+ if (ptr->cached)
+ prt_str(out, " cached");
+ if (ptr->unwritten)
+ prt_str(out, " unwritten");
+ if (bucket_valid(ca, b) && dev_ptr_stale_rcu(ca, ptr))
+ prt_printf(out, " stale");
+ }
+ rcu_read_unlock();
+ --out->atomic;
+}
+
void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
struct bkey_s_c k)
{
@@ -988,42 +1046,22 @@ void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
prt_printf(out, " ");
switch (__extent_entry_type(entry)) {
- case BCH_EXTENT_ENTRY_ptr: {
- const struct bch_extent_ptr *ptr = entry_to_ptr(entry);
- struct bch_dev *ca = c && ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
- ? bch_dev_bkey_exists(c, ptr->dev)
- : NULL;
-
- if (!ca) {
- prt_printf(out, "ptr: %u:%llu gen %u%s", ptr->dev,
- (u64) ptr->offset, ptr->gen,
- ptr->cached ? " cached" : "");
- } else {
- u32 offset;
- u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
-
- prt_printf(out, "ptr: %u:%llu:%u gen %u",
- ptr->dev, b, offset, ptr->gen);
- if (ptr->cached)
- prt_str(out, " cached");
- if (ptr->unwritten)
- prt_str(out, " unwritten");
- if (ca && ptr_stale(ca, ptr))
- prt_printf(out, " stale");
- }
+ case BCH_EXTENT_ENTRY_ptr:
+ bch2_extent_ptr_to_text(out, c, entry_to_ptr(entry));
break;
- }
+
case BCH_EXTENT_ENTRY_crc32:
case BCH_EXTENT_ENTRY_crc64:
case BCH_EXTENT_ENTRY_crc128: {
struct bch_extent_crc_unpacked crc =
bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
- prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum %s compress ",
+ prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum ",
crc.compressed_size,
crc.uncompressed_size,
- crc.offset, crc.nonce,
- bch2_csum_types[crc.csum_type]);
+ crc.offset, crc.nonce);
+ bch2_prt_csum_type(out, crc.csum_type);
+ prt_str(out, " compress ");
bch2_prt_compression_type(out, crc.compression_type);
break;
}
@@ -1057,55 +1095,50 @@ void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
static int extent_ptr_invalid(struct bch_fs *c,
struct bkey_s_c k,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
const struct bch_extent_ptr *ptr,
unsigned size_ondisk,
bool metadata,
struct printbuf *err)
{
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
- u64 bucket;
- u32 bucket_offset;
- struct bch_dev *ca;
int ret = 0;
- if (!bch2_dev_exists2(c, ptr->dev)) {
- /*
- * If we're in the write path this key might have already been
- * overwritten, and we could be seeing a device that doesn't
- * exist anymore due to racing with device removal:
- */
- if (flags & BKEY_INVALID_WRITE)
- return 0;
-
- bkey_fsck_err(c, err, ptr_to_invalid_device,
- "pointer to invalid device (%u)", ptr->dev);
+ rcu_read_lock();
+ struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
+ if (!ca) {
+ rcu_read_unlock();
+ return 0;
}
+ u32 bucket_offset;
+ u64 bucket = sector_to_bucket_and_offset(ca, ptr->offset, &bucket_offset);
+ unsigned first_bucket = ca->mi.first_bucket;
+ u64 nbuckets = ca->mi.nbuckets;
+ unsigned bucket_size = ca->mi.bucket_size;
+ rcu_read_unlock();
- ca = bch_dev_bkey_exists(c, ptr->dev);
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
bkey_for_each_ptr(ptrs, ptr2)
bkey_fsck_err_on(ptr != ptr2 && ptr->dev == ptr2->dev, c, err,
ptr_to_duplicate_device,
"multiple pointers to same device (%u)", ptr->dev);
- bucket = sector_to_bucket_and_offset(ca, ptr->offset, &bucket_offset);
- bkey_fsck_err_on(bucket >= ca->mi.nbuckets, c, err,
+ bkey_fsck_err_on(bucket >= nbuckets, c, err,
ptr_after_last_bucket,
- "pointer past last bucket (%llu > %llu)", bucket, ca->mi.nbuckets);
- bkey_fsck_err_on(ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket), c, err,
+ "pointer past last bucket (%llu > %llu)", bucket, nbuckets);
+ bkey_fsck_err_on(bucket < first_bucket, c, err,
ptr_before_first_bucket,
- "pointer before first bucket (%llu < %u)", bucket, ca->mi.first_bucket);
- bkey_fsck_err_on(bucket_offset + size_ondisk > ca->mi.bucket_size, c, err,
+ "pointer before first bucket (%llu < %u)", bucket, first_bucket);
+ bkey_fsck_err_on(bucket_offset + size_ondisk > bucket_size, c, err,
ptr_spans_multiple_buckets,
"pointer spans multiple buckets (%u + %u > %u)",
- bucket_offset, size_ondisk, ca->mi.bucket_size);
+ bucket_offset, size_ondisk, bucket_size);
fsck_err:
return ret;
}
int bch2_bkey_ptrs_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
struct printbuf *err)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
@@ -1181,7 +1214,7 @@ int bch2_bkey_ptrs_invalid(struct bch_fs *c, struct bkey_s_c k,
bkey_fsck_err_on(crc_is_encoded(crc) &&
(crc.uncompressed_size > c->opts.encoded_extent_max >> 9) &&
- (flags & (BKEY_INVALID_WRITE|BKEY_INVALID_COMMIT)), c, err,
+ (flags & (BCH_VALIDATE_write|BCH_VALIDATE_commit)), c, err,
ptr_crc_uncompressed_size_too_big,
"too large encoded extent");
diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h
index fd2669cdd76f..1ade959652b2 100644
--- a/fs/bcachefs/extents.h
+++ b/fs/bcachefs/extents.h
@@ -8,7 +8,7 @@
struct bch_fs;
struct btree_trans;
-enum bkey_invalid_flags;
+enum bch_validate_flags;
/* extent entries: */
@@ -406,12 +406,12 @@ int bch2_bkey_pick_read_device(struct bch_fs *, struct bkey_s_c,
/* KEY_TYPE_btree_ptr: */
int bch2_btree_ptr_invalid(struct bch_fs *, struct bkey_s_c,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
void bch2_btree_ptr_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
int bch2_btree_ptr_v2_invalid(struct bch_fs *, struct bkey_s_c,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
void bch2_btree_ptr_v2_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned,
int, struct bkey_s);
@@ -448,7 +448,7 @@ bool bch2_extent_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
/* KEY_TYPE_reservation: */
int bch2_reservation_invalid(struct bch_fs *, struct bkey_s_c,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
void bch2_reservation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
bool bch2_reservation_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
@@ -596,30 +596,6 @@ static inline struct bch_devs_list bch2_bkey_cached_devs(struct bkey_s_c k)
return ret;
}
-static inline unsigned bch2_bkey_ptr_data_type(struct bkey_s_c k, const struct bch_extent_ptr *ptr)
-{
- switch (k.k->type) {
- case KEY_TYPE_btree_ptr:
- case KEY_TYPE_btree_ptr_v2:
- return BCH_DATA_btree;
- case KEY_TYPE_extent:
- case KEY_TYPE_reflink_v:
- return BCH_DATA_user;
- case KEY_TYPE_stripe: {
- struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
-
- BUG_ON(ptr < s.v->ptrs ||
- ptr >= s.v->ptrs + s.v->nr_blocks);
-
- return ptr >= s.v->ptrs + s.v->nr_blocks - s.v->nr_redundant
- ? BCH_DATA_parity
- : BCH_DATA_user;
- }
- default:
- BUG();
- }
-}
-
unsigned bch2_bkey_nr_ptrs(struct bkey_s_c);
unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c);
unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c);
@@ -678,7 +654,7 @@ union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s,
do { \
struct bkey_ptrs _ptrs = bch2_bkey_ptrs(_k); \
\
- _ptr = &_ptrs.start->ptr; \
+ struct bch_extent_ptr *_ptr = &_ptrs.start->ptr; \
\
while ((_ptr = bkey_ptr_next(_ptrs, _ptr))) { \
if (_cond) { \
@@ -700,10 +676,11 @@ bch2_extent_has_ptr(struct bkey_s_c, struct extent_ptr_decoded, struct bkey_s);
void bch2_extent_ptr_set_cached(struct bkey_s, struct bch_extent_ptr *);
bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
+void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *, const struct bch_extent_ptr *);
void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
int bch2_bkey_ptrs_invalid(struct bch_fs *, struct bkey_s_c,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
void bch2_ptr_swab(struct bkey_s);
diff --git a/fs/bcachefs/eytzinger.c b/fs/bcachefs/eytzinger.c
new file mode 100644
index 000000000000..2eaffe37b5e7
--- /dev/null
+++ b/fs/bcachefs/eytzinger.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "eytzinger.h"
+
+/**
+ * is_aligned - is this pointer & size okay for word-wide copying?
+ * @base: pointer to data
+ * @size: size of each element
+ * @align: required alignment (typically 4 or 8)
+ *
+ * Returns true if elements can be copied using word loads and stores.
+ * The size must be a multiple of the alignment, and the base address must
+ * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.
+ *
+ * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)"
+ * to "if ((a | b) & mask)", so we do that by hand.
+ */
+__attribute_const__ __always_inline
+static bool is_aligned(const void *base, size_t size, unsigned char align)
+{
+ unsigned char lsbits = (unsigned char)size;
+
+ (void)base;
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ lsbits |= (unsigned char)(uintptr_t)base;
+#endif
+ return (lsbits & (align - 1)) == 0;
+}
+
+/**
+ * swap_words_32 - swap two elements in 32-bit chunks
+ * @a: pointer to the first element to swap
+ * @b: pointer to the second element to swap
+ * @n: element size (must be a multiple of 4)
+ *
+ * Exchange the two objects in memory. This exploits base+index addressing,
+ * which basically all CPUs have, to minimize loop overhead computations.
+ *
+ * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
+ * bottom of the loop, even though the zero flag is still valid from the
+ * subtract (since the intervening mov instructions don't alter the flags).
+ * Gcc 8.1.0 doesn't have that problem.
+ */
+static void swap_words_32(void *a, void *b, size_t n)
+{
+ do {
+ u32 t = *(u32 *)(a + (n -= 4));
+ *(u32 *)(a + n) = *(u32 *)(b + n);
+ *(u32 *)(b + n) = t;
+ } while (n);
+}
+
+/**
+ * swap_words_64 - swap two elements in 64-bit chunks
+ * @a: pointer to the first element to swap
+ * @b: pointer to the second element to swap
+ * @n: element size (must be a multiple of 8)
+ *
+ * Exchange the two objects in memory. This exploits base+index
+ * addressing, which basically all CPUs have, to minimize loop overhead
+ * computations.
+ *
+ * We'd like to use 64-bit loads if possible. If they're not, emulating
+ * one requires base+index+4 addressing which x86 has but most other
+ * processors do not. If CONFIG_64BIT, we definitely have 64-bit loads,
+ * but it's possible to have 64-bit loads without 64-bit pointers (e.g.
+ * x32 ABI). Are there any cases the kernel needs to worry about?
+ */
+static void swap_words_64(void *a, void *b, size_t n)
+{
+ do {
+#ifdef CONFIG_64BIT
+ u64 t = *(u64 *)(a + (n -= 8));
+ *(u64 *)(a + n) = *(u64 *)(b + n);
+ *(u64 *)(b + n) = t;
+#else
+ /* Use two 32-bit transfers to avoid base+index+4 addressing */
+ u32 t = *(u32 *)(a + (n -= 4));
+ *(u32 *)(a + n) = *(u32 *)(b + n);
+ *(u32 *)(b + n) = t;
+
+ t = *(u32 *)(a + (n -= 4));
+ *(u32 *)(a + n) = *(u32 *)(b + n);
+ *(u32 *)(b + n) = t;
+#endif
+ } while (n);
+}
+
+/**
+ * swap_bytes - swap two elements a byte at a time
+ * @a: pointer to the first element to swap
+ * @b: pointer to the second element to swap
+ * @n: element size
+ *
+ * This is the fallback if alignment doesn't allow using larger chunks.
+ */
+static void swap_bytes(void *a, void *b, size_t n)
+{
+ do {
+ char t = ((char *)a)[--n];
+ ((char *)a)[n] = ((char *)b)[n];
+ ((char *)b)[n] = t;
+ } while (n);
+}
+
+/*
+ * The values are arbitrary as long as they can't be confused with
+ * a pointer, but small integers make for the smallest compare
+ * instructions.
+ */
+#define SWAP_WORDS_64 (swap_r_func_t)0
+#define SWAP_WORDS_32 (swap_r_func_t)1
+#define SWAP_BYTES (swap_r_func_t)2
+#define SWAP_WRAPPER (swap_r_func_t)3
+
+struct wrapper {
+ cmp_func_t cmp;
+ swap_func_t swap_func;
+};
+
+/*
+ * The function pointer is last to make tail calls most efficient if the
+ * compiler decides not to inline this function.
+ */
+static void do_swap(void *a, void *b, size_t size, swap_r_func_t swap_func, const void *priv)
+{
+ if (swap_func == SWAP_WRAPPER) {
+ ((const struct wrapper *)priv)->swap_func(a, b, (int)size);
+ return;
+ }
+
+ if (swap_func == SWAP_WORDS_64)
+ swap_words_64(a, b, size);
+ else if (swap_func == SWAP_WORDS_32)
+ swap_words_32(a, b, size);
+ else if (swap_func == SWAP_BYTES)
+ swap_bytes(a, b, size);
+ else
+ swap_func(a, b, (int)size, priv);
+}
+
+#define _CMP_WRAPPER ((cmp_r_func_t)0L)
+
+static int do_cmp(const void *a, const void *b, cmp_r_func_t cmp, const void *priv)
+{
+ if (cmp == _CMP_WRAPPER)
+ return ((const struct wrapper *)priv)->cmp(a, b);
+ return cmp(a, b, priv);
+}
+
+static inline int eytzinger0_do_cmp(void *base, size_t n, size_t size,
+ cmp_r_func_t cmp_func, const void *priv,
+ size_t l, size_t r)
+{
+ return do_cmp(base + inorder_to_eytzinger0(l, n) * size,
+ base + inorder_to_eytzinger0(r, n) * size,
+ cmp_func, priv);
+}
+
+static inline void eytzinger0_do_swap(void *base, size_t n, size_t size,
+ swap_r_func_t swap_func, const void *priv,
+ size_t l, size_t r)
+{
+ do_swap(base + inorder_to_eytzinger0(l, n) * size,
+ base + inorder_to_eytzinger0(r, n) * size,
+ size, swap_func, priv);
+}
+
+void eytzinger0_sort_r(void *base, size_t n, size_t size,
+ cmp_r_func_t cmp_func,
+ swap_r_func_t swap_func,
+ const void *priv)
+{
+ int i, j, k;
+
+ /* called from 'sort' without swap function, let's pick the default */
+ if (swap_func == SWAP_WRAPPER && !((struct wrapper *)priv)->swap_func)
+ swap_func = NULL;
+
+ if (!swap_func) {
+ if (is_aligned(base, size, 8))
+ swap_func = SWAP_WORDS_64;
+ else if (is_aligned(base, size, 4))
+ swap_func = SWAP_WORDS_32;
+ else
+ swap_func = SWAP_BYTES;
+ }
+
+ /* heapify */
+ for (i = n / 2 - 1; i >= 0; --i) {
+ /* Find the sift-down path all the way to the leaves. */
+ for (j = i; k = j * 2 + 1, k + 1 < n;)
+ j = eytzinger0_do_cmp(base, n, size, cmp_func, priv, k, k + 1) > 0 ? k : k + 1;
+
+ /* Special case for the last leaf with no sibling. */
+ if (j * 2 + 2 == n)
+ j = j * 2 + 1;
+
+ /* Backtrack to the correct location. */
+ while (j != i && eytzinger0_do_cmp(base, n, size, cmp_func, priv, i, j) >= 0)
+ j = (j - 1) / 2;
+
+ /* Shift the element into its correct place. */
+ for (k = j; j != i;) {
+ j = (j - 1) / 2;
+ eytzinger0_do_swap(base, n, size, swap_func, priv, j, k);
+ }
+ }
+
+ /* sort */
+ for (i = n - 1; i > 0; --i) {
+ eytzinger0_do_swap(base, n, size, swap_func, priv, 0, i);
+
+ /* Find the sift-down path all the way to the leaves. */
+ for (j = 0; k = j * 2 + 1, k + 1 < i;)
+ j = eytzinger0_do_cmp(base, n, size, cmp_func, priv, k, k + 1) > 0 ? k : k + 1;
+
+ /* Special case for the last leaf with no sibling. */
+ if (j * 2 + 2 == i)
+ j = j * 2 + 1;
+
+ /* Backtrack to the correct location. */
+ while (j && eytzinger0_do_cmp(base, n, size, cmp_func, priv, 0, j) >= 0)
+ j = (j - 1) / 2;
+
+ /* Shift the element into its correct place. */
+ for (k = j; j;) {
+ j = (j - 1) / 2;
+ eytzinger0_do_swap(base, n, size, swap_func, priv, j, k);
+ }
+ }
+}
+
+void eytzinger0_sort(void *base, size_t n, size_t size,
+ cmp_func_t cmp_func,
+ swap_func_t swap_func)
+{
+ struct wrapper w = {
+ .cmp = cmp_func,
+ .swap_func = swap_func,
+ };
+
+ return eytzinger0_sort_r(base, n, size, _CMP_WRAPPER, SWAP_WRAPPER, &w);
+}
+
+#if 0
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/ktime.h>
+
+static u64 cmp_count;
+
+static int mycmp(const void *a, const void *b)
+{
+ u32 _a = *(u32 *)a;
+ u32 _b = *(u32 *)b;
+
+ cmp_count++;
+ if (_a < _b)
+ return -1;
+ else if (_a > _b)
+ return 1;
+ else
+ return 0;
+}
+
+static int test(void)
+{
+ size_t N, i;
+ ktime_t start, end;
+ s64 delta;
+ u32 *arr;
+
+ for (N = 10000; N <= 100000; N += 10000) {
+ arr = kmalloc_array(N, sizeof(u32), GFP_KERNEL);
+ cmp_count = 0;
+
+ for (i = 0; i < N; i++)
+ arr[i] = get_random_u32();
+
+ start = ktime_get();
+ eytzinger0_sort(arr, N, sizeof(u32), mycmp, NULL);
+ end = ktime_get();
+
+ delta = ktime_us_delta(end, start);
+ printk(KERN_INFO "time: %lld\n", delta);
+ printk(KERN_INFO "comparisons: %lld\n", cmp_count);
+
+ u32 prev = 0;
+
+ eytzinger0_for_each(i, N) {
+ if (prev > arr[i])
+ goto err;
+ prev = arr[i];
+ }
+
+ kfree(arr);
+ }
+ return 0;
+
+err:
+ kfree(arr);
+ return -1;
+}
+#endif
diff --git a/fs/bcachefs/eytzinger.h b/fs/bcachefs/eytzinger.h
index b04750dbf870..24840aee335c 100644
--- a/fs/bcachefs/eytzinger.h
+++ b/fs/bcachefs/eytzinger.h
@@ -5,23 +5,33 @@
#include <linux/bitops.h>
#include <linux/log2.h>
-#include "util.h"
+#ifdef EYTZINGER_DEBUG
+#define EYTZINGER_BUG_ON(cond) BUG_ON(cond)
+#else
+#define EYTZINGER_BUG_ON(cond)
+#endif
/*
* Traversal for trees in eytzinger layout - a full binary tree layed out in an
- * array
- */
-
-/*
- * One based indexing version:
+ * array.
+ *
+ * Consider using an eytzinger tree any time you would otherwise be doing binary
+ * search over an array. Binary search is a worst case scenario for branch
+ * prediction and prefetching, but in an eytzinger tree every node's children
+ * are adjacent in memory, thus we can prefetch children before knowing the
+ * result of the comparison, assuming multiple nodes fit on a cacheline.
*
- * With one based indexing each level of the tree starts at a power of two -
- * good for cacheline alignment:
+ * Two variants are provided, for one based indexing and zero based indexing.
+ *
+ * Zero based indexing is more convenient, but one based indexing has better
+ * alignment and thus better performance because each new level of the tree
+ * starts at a power of two, and thus if element 0 was cacheline aligned, each
+ * new level will be as well.
*/
static inline unsigned eytzinger1_child(unsigned i, unsigned child)
{
- EBUG_ON(child > 1);
+ EYTZINGER_BUG_ON(child > 1);
return (i << 1) + child;
}
@@ -58,7 +68,7 @@ static inline unsigned eytzinger1_last(unsigned size)
static inline unsigned eytzinger1_next(unsigned i, unsigned size)
{
- EBUG_ON(i > size);
+ EYTZINGER_BUG_ON(i > size);
if (eytzinger1_right_child(i) <= size) {
i = eytzinger1_right_child(i);
@@ -74,7 +84,7 @@ static inline unsigned eytzinger1_next(unsigned i, unsigned size)
static inline unsigned eytzinger1_prev(unsigned i, unsigned size)
{
- EBUG_ON(i > size);
+ EYTZINGER_BUG_ON(i > size);
if (eytzinger1_left_child(i) <= size) {
i = eytzinger1_left_child(i) + 1;
@@ -101,7 +111,7 @@ static inline unsigned __eytzinger1_to_inorder(unsigned i, unsigned size,
unsigned shift = __fls(size) - b;
int s;
- EBUG_ON(!i || i > size);
+ EYTZINGER_BUG_ON(!i || i > size);
i ^= 1U << b;
i <<= 1;
@@ -126,7 +136,7 @@ static inline unsigned __inorder_to_eytzinger1(unsigned i, unsigned size,
unsigned shift;
int s;
- EBUG_ON(!i || i > size);
+ EYTZINGER_BUG_ON(!i || i > size);
/*
* sign bit trick:
@@ -164,7 +174,7 @@ static inline unsigned inorder_to_eytzinger1(unsigned i, unsigned size)
static inline unsigned eytzinger0_child(unsigned i, unsigned child)
{
- EBUG_ON(child > 1);
+ EYTZINGER_BUG_ON(child > 1);
return (i << 1) + 1 + child;
}
@@ -231,11 +241,9 @@ static inline unsigned inorder_to_eytzinger0(unsigned i, unsigned size)
(_i) != -1; \
(_i) = eytzinger0_next((_i), (_size)))
-typedef int (*eytzinger_cmp_fn)(const void *l, const void *r, size_t size);
-
/* return greatest node <= @search, or -1 if not found */
-static inline ssize_t eytzinger0_find_le(void *base, size_t nr, size_t size,
- eytzinger_cmp_fn cmp, const void *search)
+static inline int eytzinger0_find_le(void *base, size_t nr, size_t size,
+ cmp_func_t cmp, const void *search)
{
unsigned i, n = 0;
@@ -244,21 +252,38 @@ static inline ssize_t eytzinger0_find_le(void *base, size_t nr, size_t size,
do {
i = n;
- n = eytzinger0_child(i, cmp(search, base + i * size, size) >= 0);
+ n = eytzinger0_child(i, cmp(base + i * size, search) <= 0);
} while (n < nr);
if (n & 1) {
- /* @i was greater than @search, return previous node: */
-
- if (i == eytzinger0_first(nr))
- return -1;
-
+ /*
+ * @i was greater than @search, return previous node:
+ *
+ * if @i was leftmost/smallest element,
+ * eytzinger0_prev(eytzinger0_first())) returns -1, as expected
+ */
return eytzinger0_prev(i, nr);
} else {
return i;
}
}
+static inline int eytzinger0_find_gt(void *base, size_t nr, size_t size,
+ cmp_func_t cmp, const void *search)
+{
+ ssize_t idx = eytzinger0_find_le(base, nr, size, cmp, search);
+
+ /*
+ * if eytitzinger0_find_le() returned -1 - no element was <= search - we
+ * want to return the first element; next/prev identities mean this work
+ * as expected
+ *
+ * similarly if find_le() returns last element, we should return -1;
+ * identities mean this all works out:
+ */
+ return eytzinger0_next(idx, nr);
+}
+
#define eytzinger0_find(base, nr, size, _cmp, search) \
({ \
void *_base = (base); \
@@ -269,13 +294,13 @@ static inline ssize_t eytzinger0_find_le(void *base, size_t nr, size_t size,
int _res; \
\
while (_i < _nr && \
- (_res = _cmp(_search, _base + _i * _size, _size))) \
+ (_res = _cmp(_search, _base + _i * _size))) \
_i = eytzinger0_child(_i, _res > 0); \
_i; \
})
-void eytzinger0_sort(void *, size_t, size_t,
- int (*cmp_func)(const void *, const void *, size_t),
- void (*swap_func)(void *, void *, size_t));
+void eytzinger0_sort_r(void *, size_t, size_t,
+ cmp_r_func_t, swap_r_func_t, const void *);
+void eytzinger0_sort(void *, size_t, size_t, cmp_func_t, swap_func_t);
#endif /* _EYTZINGER_H */
diff --git a/fs/bcachefs/fs-common.c b/fs/bcachefs/fs-common.c
index 624e6f963240..508d029ac53d 100644
--- a/fs/bcachefs/fs-common.c
+++ b/fs/bcachefs/fs-common.c
@@ -42,7 +42,7 @@ int bch2_create_trans(struct btree_trans *trans,
if (ret)
goto err;
- ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir, BTREE_ITER_INTENT);
+ ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir, BTREE_ITER_intent);
if (ret)
goto err;
@@ -70,7 +70,7 @@ int bch2_create_trans(struct btree_trans *trans,
struct bch_subvolume s;
ret = bch2_subvolume_get(trans, snapshot_src.subvol, true,
- BTREE_ITER_CACHED, &s);
+ BTREE_ITER_cached, &s);
if (ret)
goto err;
@@ -78,7 +78,7 @@ int bch2_create_trans(struct btree_trans *trans,
}
ret = bch2_inode_peek(trans, &inode_iter, new_inode, snapshot_src,
- BTREE_ITER_INTENT);
+ BTREE_ITER_intent);
if (ret)
goto err;
@@ -163,7 +163,7 @@ int bch2_create_trans(struct btree_trans *trans,
name,
dir_target,
&dir_offset,
- BCH_HASH_SET_MUST_CREATE);
+ STR_HASH_must_create);
if (ret)
goto err;
@@ -171,7 +171,7 @@ int bch2_create_trans(struct btree_trans *trans,
new_inode->bi_dir_offset = dir_offset;
}
- inode_iter.flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
+ inode_iter.flags &= ~BTREE_ITER_all_snapshots;
bch2_btree_iter_set_snapshot(&inode_iter, snapshot);
ret = bch2_btree_iter_traverse(&inode_iter) ?:
@@ -198,16 +198,16 @@ int bch2_link_trans(struct btree_trans *trans,
if (dir.subvol != inum.subvol)
return -EXDEV;
- ret = bch2_inode_peek(trans, &inode_iter, inode_u, inum, BTREE_ITER_INTENT);
+ ret = bch2_inode_peek(trans, &inode_iter, inode_u, inum, BTREE_ITER_intent);
if (ret)
- goto err;
+ return ret;
inode_u->bi_ctime = now;
ret = bch2_inode_nlink_inc(inode_u);
if (ret)
- return ret;
+ goto err;
- ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir, BTREE_ITER_INTENT);
+ ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir, BTREE_ITER_intent);
if (ret)
goto err;
@@ -223,7 +223,7 @@ int bch2_link_trans(struct btree_trans *trans,
ret = bch2_dirent_create(trans, dir, &dir_hash,
mode_to_type(inode_u->bi_mode),
name, inum.inum, &dir_offset,
- BCH_HASH_SET_MUST_CREATE);
+ STR_HASH_must_create);
if (ret)
goto err;
@@ -255,19 +255,19 @@ int bch2_unlink_trans(struct btree_trans *trans,
struct bkey_s_c k;
int ret;
- ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir, BTREE_ITER_INTENT);
+ ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir, BTREE_ITER_intent);
if (ret)
goto err;
dir_hash = bch2_hash_info_init(c, dir_u);
ret = bch2_dirent_lookup_trans(trans, &dirent_iter, dir, &dir_hash,
- name, &inum, BTREE_ITER_INTENT);
+ name, &inum, BTREE_ITER_intent);
if (ret)
goto err;
ret = bch2_inode_peek(trans, &inode_iter, inode_u, inum,
- BTREE_ITER_INTENT);
+ BTREE_ITER_intent);
if (ret)
goto err;
@@ -322,7 +322,7 @@ int bch2_unlink_trans(struct btree_trans *trans,
ret = bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
&dir_hash, &dirent_iter,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
+ BTREE_UPDATE_internal_snapshot_node) ?:
bch2_inode_write(trans, &dir_iter, dir_u) ?:
bch2_inode_write(trans, &inode_iter, inode_u);
err:
@@ -363,7 +363,7 @@ static int subvol_update_parent(struct btree_trans *trans, u32 subvol, u32 new_p
struct bkey_i_subvolume *s =
bch2_bkey_get_mut_typed(trans, &iter,
BTREE_ID_subvolumes, POS(0, subvol),
- BTREE_ITER_CACHED, subvolume);
+ BTREE_ITER_cached, subvolume);
int ret = PTR_ERR_OR_ZERO(s);
if (ret)
return ret;
@@ -394,7 +394,7 @@ int bch2_rename_trans(struct btree_trans *trans,
int ret;
ret = bch2_inode_peek(trans, &src_dir_iter, src_dir_u, src_dir,
- BTREE_ITER_INTENT);
+ BTREE_ITER_intent);
if (ret)
goto err;
@@ -403,7 +403,7 @@ int bch2_rename_trans(struct btree_trans *trans,
if (dst_dir.inum != src_dir.inum ||
dst_dir.subvol != src_dir.subvol) {
ret = bch2_inode_peek(trans, &dst_dir_iter, dst_dir_u, dst_dir,
- BTREE_ITER_INTENT);
+ BTREE_ITER_intent);
if (ret)
goto err;
@@ -423,13 +423,13 @@ int bch2_rename_trans(struct btree_trans *trans,
goto err;
ret = bch2_inode_peek(trans, &src_inode_iter, src_inode_u, src_inum,
- BTREE_ITER_INTENT);
+ BTREE_ITER_intent);
if (ret)
goto err;
if (dst_inum.inum) {
ret = bch2_inode_peek(trans, &dst_inode_iter, dst_inode_u, dst_inum,
- BTREE_ITER_INTENT);
+ BTREE_ITER_intent);
if (ret)
goto err;
}
diff --git a/fs/bcachefs/fs-io-buffered.c b/fs/bcachefs/fs-io-buffered.c
index 39292e7ef342..b0a33fabadf8 100644
--- a/fs/bcachefs/fs-io-buffered.c
+++ b/fs/bcachefs/fs-io-buffered.c
@@ -30,15 +30,8 @@ static void bch2_readpages_end_io(struct bio *bio)
{
struct folio_iter fi;
- bio_for_each_folio_all(fi, bio) {
- if (!bio->bi_status) {
- folio_mark_uptodate(fi.folio);
- } else {
- folio_clear_uptodate(fi.folio);
- folio_set_error(fi.folio);
- }
- folio_unlock(fi.folio);
- }
+ bio_for_each_folio_all(fi, bio)
+ folio_end_read(fi.folio, bio->bi_status == BLK_STS_OK);
bio_put(bio);
}
@@ -176,7 +169,7 @@ retry:
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
SPOS(inum.inum, rbio->bio.bi_iter.bi_sector, snapshot),
- BTREE_ITER_SLOTS);
+ BTREE_ITER_slots);
while (1) {
struct bkey_s_c k;
unsigned bytes, sectors, offset_into_extent;
@@ -408,7 +401,6 @@ static void bch2_writepage_io_done(struct bch_write_op *op)
bio_for_each_folio_all(fi, bio) {
struct bch_folio *s;
- folio_set_error(fi.folio);
mapping_set_error(fi.folio->mapping, -EIO);
s = __bch2_folio(fi.folio);
diff --git a/fs/bcachefs/fs-io-direct.c b/fs/bcachefs/fs-io-direct.c
index 33cb6da3a5ad..09d21aef879a 100644
--- a/fs/bcachefs/fs-io-direct.c
+++ b/fs/bcachefs/fs-io-direct.c
@@ -254,7 +254,7 @@ retry:
for_each_btree_key_norestart(trans, iter, BTREE_ID_extents,
SPOS(inum.inum, offset, snapshot),
- BTREE_ITER_SLOTS, k, err) {
+ BTREE_ITER_slots, k, err) {
if (bkey_ge(bkey_start_pos(k.k), POS(inum.inum, end)))
break;
@@ -387,6 +387,8 @@ static __always_inline long bch2_dio_write_done(struct dio_write *dio)
ret = dio->op.error ?: ((long) dio->written << 9);
bio_put(&dio->op.wbio.bio);
+ bch2_write_ref_put(dio->op.c, BCH_WRITE_REF_dio_write);
+
/* inode->i_dio_count is our ref on inode and thus bch_fs */
inode_dio_end(&inode->v);
@@ -536,7 +538,7 @@ static __always_inline long bch2_dio_write_loop(struct dio_write *dio)
if (likely(!dio->iter.count) || dio->op.error)
break;
- bio_reset(bio, NULL, REQ_OP_WRITE);
+ bio_reset(bio, NULL, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
}
out:
return bch2_dio_write_done(dio);
@@ -590,22 +592,25 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
prefetch(&inode->ei_inode);
prefetch((void *) &inode->ei_inode + 64);
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_dio_write))
+ return -EROFS;
+
inode_lock(&inode->v);
ret = generic_write_checks(req, iter);
if (unlikely(ret <= 0))
- goto err;
+ goto err_put_write_ref;
ret = file_remove_privs(file);
if (unlikely(ret))
- goto err;
+ goto err_put_write_ref;
ret = file_update_time(file);
if (unlikely(ret))
- goto err;
+ goto err_put_write_ref;
if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
- goto err;
+ goto err_put_write_ref;
inode_dio_begin(&inode->v);
bch2_pagecache_block_get(inode);
@@ -618,7 +623,7 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
bio = bio_alloc_bioset(NULL,
bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
- REQ_OP_WRITE,
+ REQ_OP_WRITE | REQ_SYNC | REQ_IDLE,
GFP_KERNEL,
&c->dio_write_bioset);
dio = container_of(bio, struct dio_write, op.wbio.bio);
@@ -645,7 +650,7 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
}
ret = bch2_dio_write_loop(dio);
-err:
+out:
if (locked)
inode_unlock(&inode->v);
return ret;
@@ -653,7 +658,9 @@ err_put_bio:
bch2_pagecache_block_put(inode);
bio_put(bio);
inode_dio_end(&inode->v);
- goto err;
+err_put_write_ref:
+ bch2_write_ref_put(c, BCH_WRITE_REF_dio_write);
+ goto out;
}
void bch2_fs_fs_io_direct_exit(struct bch_fs *c)
diff --git a/fs/bcachefs/fs-io-pagecache.c b/fs/bcachefs/fs-io-pagecache.c
index d359aa9b33b8..872283e5bd1e 100644
--- a/fs/bcachefs/fs-io-pagecache.c
+++ b/fs/bcachefs/fs-io-pagecache.c
@@ -214,7 +214,7 @@ retry:
for_each_btree_key_norestart(trans, iter, BTREE_ID_extents,
SPOS(inum.inum, offset, snapshot),
- BTREE_ITER_SLOTS, k, ret) {
+ BTREE_ITER_slots, k, ret) {
unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
unsigned state = bkey_to_sector_state(k);
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
index 8c70123b6a0c..ef20b64033e0 100644
--- a/fs/bcachefs/fs-io.c
+++ b/fs/bcachefs/fs-io.c
@@ -174,18 +174,18 @@ void __bch2_i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
static int bch2_flush_inode(struct bch_fs *c,
struct bch_inode_info *inode)
{
- struct bch_inode_unpacked u;
- int ret;
-
if (c->opts.journal_flush_disabled)
return 0;
- ret = bch2_inode_find_by_inum(c, inode_inum(inode), &u);
- if (ret)
- return ret;
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_fsync))
+ return -EROFS;
- return bch2_journal_flush_seq(&c->journal, u.bi_journal_seq) ?:
- bch2_inode_flush_nocow_writes(c, inode);
+ struct bch_inode_unpacked u;
+ int ret = bch2_inode_find_by_inum(c, inode_inum(inode), &u) ?:
+ bch2_journal_flush_seq(&c->journal, u.bi_journal_seq) ?:
+ bch2_inode_flush_nocow_writes(c, inode);
+ bch2_write_ref_put(c, BCH_WRITE_REF_fsync);
+ return ret;
}
int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
@@ -202,7 +202,10 @@ int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
goto out;
ret = bch2_flush_inode(c, inode);
out:
- return bch2_err_class(ret);
+ ret = bch2_err_class(ret);
+ if (ret == -EROFS)
+ ret = -EIO;
+ return ret;
}
/* truncate: */
@@ -594,7 +597,7 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
POS(inode->v.i_ino, start_sector),
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+ BTREE_ITER_slots|BTREE_ITER_intent);
while (!ret && bkey_lt(iter.pos, end_pos)) {
s64 i_sectors_delta = 0;
@@ -1009,7 +1012,7 @@ retry:
for_each_btree_key_norestart(trans, iter, BTREE_ID_extents,
SPOS(inode->v.i_ino, offset >> 9, snapshot),
- BTREE_ITER_SLOTS, k, ret) {
+ BTREE_ITER_slots, k, ret) {
if (k.k->p.inode != inode->v.i_ino) {
next_hole = bch2_seek_pagecache_hole(&inode->v,
offset, MAX_LFS_FILESIZE, 0, false);
diff --git a/fs/bcachefs/fs-ioctl.c b/fs/bcachefs/fs-ioctl.c
index 3dc8630ff9fe..205a323ffc6d 100644
--- a/fs/bcachefs/fs-ioctl.c
+++ b/fs/bcachefs/fs-ioctl.c
@@ -548,7 +548,7 @@ long bch2_compat_fs_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
/* These are just misnamed, they actually get/put from/to user an int */
switch (cmd) {
- case FS_IOC_GETFLAGS:
+ case FS_IOC32_GETFLAGS:
cmd = FS_IOC_GETFLAGS;
break;
case FS_IOC32_SETFLAGS:
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index 0ccee05f6887..fd851f10d11c 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -90,7 +90,7 @@ retry:
bch2_trans_begin(trans);
ret = bch2_inode_peek(trans, &iter, &inode_u, inode_inum(inode),
- BTREE_ITER_INTENT) ?:
+ BTREE_ITER_intent) ?:
(set ? set(trans, inode, &inode_u, p) : 0) ?:
bch2_inode_write(trans, &iter, &inode_u) ?:
bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
@@ -188,7 +188,8 @@ static struct bch_inode_info *bch2_inode_insert(struct bch_fs *c, struct bch_ino
BUG_ON(!old);
if (unlikely(old != inode)) {
- discard_new_inode(&inode->v);
+ __destroy_inode(&inode->v);
+ kmem_cache_free(bch2_inode_cache, inode);
inode = old;
} else {
mutex_lock(&c->vfs_inodes_lock);
@@ -212,21 +213,47 @@ static struct bch_inode_info *bch2_inode_insert(struct bch_fs *c, struct bch_ino
_ret; \
})
+static struct inode *bch2_alloc_inode(struct super_block *sb)
+{
+ BUG();
+}
+
+static struct bch_inode_info *__bch2_new_inode(struct bch_fs *c)
+{
+ struct bch_inode_info *inode = kmem_cache_alloc(bch2_inode_cache, GFP_NOFS);
+ if (!inode)
+ return NULL;
+
+ inode_init_once(&inode->v);
+ mutex_init(&inode->ei_update_lock);
+ two_state_lock_init(&inode->ei_pagecache_lock);
+ INIT_LIST_HEAD(&inode->ei_vfs_inode_list);
+ mutex_init(&inode->ei_quota_lock);
+ inode->v.i_state = 0;
+
+ if (unlikely(inode_init_always(c->vfs_sb, &inode->v))) {
+ kmem_cache_free(bch2_inode_cache, inode);
+ return NULL;
+ }
+
+ return inode;
+}
+
/*
* Allocate a new inode, dropping/retaking btree locks if necessary:
*/
static struct bch_inode_info *bch2_new_inode(struct btree_trans *trans)
{
- struct bch_fs *c = trans->c;
-
struct bch_inode_info *inode =
memalloc_flags_do(PF_MEMALLOC_NORECLAIM|PF_MEMALLOC_NOWARN,
- to_bch_ei(new_inode(c->vfs_sb)));
+ __bch2_new_inode(trans->c));
if (unlikely(!inode)) {
- int ret = drop_locks_do(trans, (inode = to_bch_ei(new_inode(c->vfs_sb))) ? 0 : -ENOMEM);
- if (ret && inode)
- discard_new_inode(&inode->v);
+ int ret = drop_locks_do(trans, (inode = __bch2_new_inode(trans->c)) ? 0 : -ENOMEM);
+ if (ret && inode) {
+ __destroy_inode(&inode->v);
+ kmem_cache_free(bch2_inode_cache, inode);
+ }
if (ret)
return ERR_PTR(ret);
}
@@ -287,7 +314,7 @@ __bch2_create(struct mnt_idmap *idmap,
if (ret)
return ERR_PTR(ret);
#endif
- inode = to_bch_ei(new_inode(c->vfs_sb));
+ inode = __bch2_new_inode(c);
if (unlikely(!inode)) {
inode = ERR_PTR(-ENOMEM);
goto err;
@@ -320,7 +347,7 @@ retry:
inum.inum = inode_u.bi_inum;
ret = bch2_subvolume_get(trans, inum.subvol, true,
- BTREE_ITER_WITH_UPDATES, &subvol) ?:
+ BTREE_ITER_with_updates, &subvol) ?:
bch2_trans_commit(trans, NULL, &journal_seq, 0);
if (unlikely(ret)) {
bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, -1,
@@ -373,17 +400,14 @@ static struct bch_inode_info *bch2_lookup_trans(struct btree_trans *trans,
struct bch_fs *c = trans->c;
struct btree_iter dirent_iter = {};
subvol_inum inum = {};
+ struct printbuf buf = PRINTBUF;
- int ret = bch2_hash_lookup(trans, &dirent_iter, bch2_dirent_hash_desc,
- dir_hash_info, dir, name, 0);
+ struct bkey_s_c k = bch2_hash_lookup(trans, &dirent_iter, bch2_dirent_hash_desc,
+ dir_hash_info, dir, name, 0);
+ int ret = bkey_err(k);
if (ret)
return ERR_PTR(ret);
- struct bkey_s_c k = bch2_btree_iter_peek_slot(&dirent_iter);
- ret = bkey_err(k);
- if (ret)
- goto err;
-
ret = bch2_dirent_read_target(trans, dir, bkey_s_c_to_dirent(k), &inum);
if (ret > 0)
ret = -ENOENT;
@@ -403,20 +427,31 @@ static struct bch_inode_info *bch2_lookup_trans(struct btree_trans *trans,
ret = bch2_subvolume_get(trans, inum.subvol, true, 0, &subvol) ?:
bch2_inode_find_by_inum_nowarn_trans(trans, inum, &inode_u) ?:
PTR_ERR_OR_ZERO(inode = bch2_new_inode(trans));
- if (bch2_err_matches(ret, ENOENT)) {
- struct printbuf buf = PRINTBUF;
- bch2_bkey_val_to_text(&buf, c, k);
- bch_err(c, "%s points to missing inode", buf.buf);
- printbuf_exit(&buf);
- }
+ bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT),
+ c, "dirent to missing inode:\n %s",
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
if (ret)
goto err;
+ /* regular files may have hardlinks: */
+ if (bch2_fs_inconsistent_on(bch2_inode_should_have_bp(&inode_u) &&
+ !bkey_eq(k.k->p, POS(inode_u.bi_dir, inode_u.bi_dir_offset)),
+ c,
+ "dirent points to inode that does not point back:\n %s",
+ (bch2_bkey_val_to_text(&buf, c, k),
+ prt_printf(&buf, "\n "),
+ bch2_inode_unpacked_to_text(&buf, &inode_u),
+ buf.buf))) {
+ ret = -ENOENT;
+ goto err;
+ }
+
bch2_vfs_inode_init(trans, inum, inode, &inode_u, &subvol);
inode = bch2_inode_insert(c, inode);
out:
bch2_trans_iter_exit(trans, &dirent_iter);
+ printbuf_exit(&buf);
return inode;
err:
inode = ERR_PTR(ret);
@@ -784,7 +819,7 @@ retry:
acl = NULL;
ret = bch2_inode_peek(trans, &inode_iter, &inode_u, inode_inum(inode),
- BTREE_ITER_INTENT);
+ BTREE_ITER_intent);
if (ret)
goto btree_err;
@@ -841,6 +876,9 @@ static int bch2_getattr(struct mnt_idmap *idmap,
stat->blksize = block_bytes(c);
stat->blocks = inode->v.i_blocks;
+ stat->subvol = inode->ei_subvol;
+ stat->result_mask |= STATX_SUBVOL;
+
if (request_mask & STATX_BTIME) {
stat->result_mask |= STATX_BTIME;
stat->btime = bch2_time_to_timespec(c, inode->ei_inode.bi_otime);
@@ -961,7 +999,6 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
struct btree_iter iter;
struct bkey_s_c k;
struct bkey_buf cur, prev;
- struct bpos end = POS(ei->v.i_ino, (start + len) >> 9);
unsigned offset_into_extent, sectors;
bool have_extent = false;
u32 snapshot;
@@ -971,6 +1008,7 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
if (ret)
return ret;
+ struct bpos end = POS(ei->v.i_ino, (start + len) >> 9);
if (start + len < start)
return -EINVAL;
@@ -1037,6 +1075,10 @@ retry:
bch2_btree_iter_set_pos(&iter,
POS(iter.pos.inode, iter.pos.offset + sectors));
+
+ ret = bch2_trans_relock(trans);
+ if (ret)
+ break;
}
start = iter.pos.offset;
bch2_trans_iter_exit(trans, &iter);
@@ -1484,34 +1526,9 @@ static void bch2_vfs_inode_init(struct btree_trans *trans, subvol_inum inum,
mapping_set_large_folios(inode->v.i_mapping);
}
-static struct inode *bch2_alloc_inode(struct super_block *sb)
-{
- struct bch_inode_info *inode;
-
- inode = kmem_cache_alloc(bch2_inode_cache, GFP_NOFS);
- if (!inode)
- return NULL;
-
- inode_init_once(&inode->v);
- mutex_init(&inode->ei_update_lock);
- two_state_lock_init(&inode->ei_pagecache_lock);
- INIT_LIST_HEAD(&inode->ei_vfs_inode_list);
- mutex_init(&inode->ei_quota_lock);
-
- return &inode->v;
-}
-
-static void bch2_i_callback(struct rcu_head *head)
+static void bch2_free_inode(struct inode *vinode)
{
- struct inode *vinode = container_of(head, struct inode, i_rcu);
- struct bch_inode_info *inode = to_bch_ei(vinode);
-
- kmem_cache_free(bch2_inode_cache, inode);
-}
-
-static void bch2_destroy_inode(struct inode *vinode)
-{
- call_rcu(&vinode->i_rcu, bch2_i_callback);
+ kmem_cache_free(bch2_inode_cache, to_bch_ei(vinode));
}
static int inode_update_times_fn(struct btree_trans *trans,
@@ -1819,7 +1836,7 @@ static int bch2_unfreeze(struct super_block *sb)
static const struct super_operations bch_super_operations = {
.alloc_inode = bch2_alloc_inode,
- .destroy_inode = bch2_destroy_inode,
+ .free_inode = bch2_free_inode,
.write_inode = bch2_vfs_write_inode,
.evict_inode = bch2_evict_inode,
.sync_fs = bch2_sync_fs,
@@ -1997,6 +2014,7 @@ out:
return dget(sb->s_root);
err_put_super:
+ __bch2_fs_stop(c);
deactivate_locked_super(sb);
return ERR_PTR(bch2_err_class(ret));
}
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
index 47d4eefaba7b..c8f57465131c 100644
--- a/fs/bcachefs/fsck.c
+++ b/fs/bcachefs/fsck.c
@@ -12,7 +12,7 @@
#include "fsck.h"
#include "inode.h"
#include "keylist.h"
-#include "recovery.h"
+#include "recovery_passes.h"
#include "snapshot.h"
#include "super.h"
#include "xattr.h"
@@ -63,9 +63,7 @@ static int subvol_lookup(struct btree_trans *trans, u32 subvol,
u32 *snapshot, u64 *inum)
{
struct bch_subvolume s;
- int ret;
-
- ret = bch2_subvolume_get(trans, subvol, false, 0, &s);
+ int ret = bch2_subvolume_get(trans, subvol, false, 0, &s);
*snapshot = le32_to_cpu(s.snapshot);
*inum = le64_to_cpu(s.inode);
@@ -81,7 +79,7 @@ static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr,
bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
POS(0, inode_nr),
- BTREE_ITER_ALL_SNAPSHOTS);
+ BTREE_ITER_all_snapshots);
k = bch2_btree_iter_peek(&iter);
ret = bkey_err(k);
if (ret)
@@ -129,13 +127,13 @@ static int lookup_dirent_in_snapshot(struct btree_trans *trans,
u64 *target, unsigned *type, u32 snapshot)
{
struct btree_iter iter;
- struct bkey_s_c_dirent d;
- int ret = bch2_hash_lookup_in_snapshot(trans, &iter, bch2_dirent_hash_desc,
- &hash_info, dir, name, 0, snapshot);
+ struct bkey_s_c k = bch2_hash_lookup_in_snapshot(trans, &iter, bch2_dirent_hash_desc,
+ &hash_info, dir, name, 0, snapshot);
+ int ret = bkey_err(k);
if (ret)
return ret;
- d = bkey_s_c_to_dirent(bch2_btree_iter_peek_slot(&iter));
+ struct bkey_s_c_dirent d = bkey_s_c_to_dirent(bch2_btree_iter_peek_slot(&iter));
*target = le64_to_cpu(d.v->d_inum);
*type = d.v->d_type;
bch2_trans_iter_exit(trans, &iter);
@@ -156,11 +154,12 @@ static int __remove_dirent(struct btree_trans *trans, struct bpos pos)
dir_hash_info = bch2_hash_info_init(c, &dir_inode);
- bch2_trans_iter_init(trans, &iter, BTREE_ID_dirents, pos, BTREE_ITER_INTENT);
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_dirents, pos, BTREE_ITER_intent);
- ret = bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
- &dir_hash_info, &iter,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ ret = bch2_btree_iter_traverse(&iter) ?:
+ bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
+ &dir_hash_info, &iter,
+ BTREE_UPDATE_internal_snapshot_node);
bch2_trans_iter_exit(trans, &iter);
err:
bch_err_fn(c, ret);
@@ -169,7 +168,8 @@ err:
/* Get lost+found, create if it doesn't exist: */
static int lookup_lostfound(struct btree_trans *trans, u32 snapshot,
- struct bch_inode_unpacked *lostfound)
+ struct bch_inode_unpacked *lostfound,
+ u64 reattaching_inum)
{
struct bch_fs *c = trans->c;
struct qstr lostfound_str = QSTR("lost+found");
@@ -184,19 +184,36 @@ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot,
return ret;
subvol_inum root_inum = { .subvol = le32_to_cpu(st.master_subvol) };
- u32 subvol_snapshot;
- ret = subvol_lookup(trans, le32_to_cpu(st.master_subvol),
- &subvol_snapshot, &root_inum.inum);
- bch_err_msg(c, ret, "looking up root subvol");
+ struct bch_subvolume subvol;
+ ret = bch2_subvolume_get(trans, le32_to_cpu(st.master_subvol),
+ false, 0, &subvol);
+ bch_err_msg(c, ret, "looking up root subvol %u for snapshot %u",
+ le32_to_cpu(st.master_subvol), snapshot);
if (ret)
return ret;
+ if (!subvol.inode) {
+ struct btree_iter iter;
+ struct bkey_i_subvolume *subvol = bch2_bkey_get_mut_typed(trans, &iter,
+ BTREE_ID_subvolumes, POS(0, le32_to_cpu(st.master_subvol)),
+ 0, subvolume);
+ ret = PTR_ERR_OR_ZERO(subvol);
+ if (ret)
+ return ret;
+
+ subvol->v.inode = cpu_to_le64(reattaching_inum);
+ bch2_trans_iter_exit(trans, &iter);
+ }
+
+ root_inum.inum = le64_to_cpu(subvol.inode);
+
struct bch_inode_unpacked root_inode;
struct bch_hash_info root_hash_info;
u32 root_inode_snapshot = snapshot;
ret = lookup_inode(trans, root_inum.inum, &root_inode, &root_inode_snapshot);
- bch_err_msg(c, ret, "looking up root inode");
+ bch_err_msg(c, ret, "looking up root inode %llu for subvol %u",
+ root_inum.inum, le32_to_cpu(st.master_subvol));
if (ret)
return ret;
@@ -257,9 +274,9 @@ create_lostfound:
&lostfound_str,
lostfound->bi_inum,
&lostfound->bi_dir_offset,
- BCH_HASH_SET_MUST_CREATE) ?:
+ STR_HASH_must_create) ?:
bch2_inode_write_flags(trans, &lostfound_iter, lostfound,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ BTREE_UPDATE_internal_snapshot_node);
err:
bch_err_msg(c, ret, "creating lost+found");
bch2_trans_iter_exit(trans, &lostfound_iter);
@@ -292,7 +309,7 @@ static int reattach_inode(struct btree_trans *trans,
snprintf(name_buf, sizeof(name_buf), "%llu", inode->bi_inum);
}
- ret = lookup_lostfound(trans, dirent_snapshot, &lostfound);
+ ret = lookup_lostfound(trans, dirent_snapshot, &lostfound, inode->bi_inum);
if (ret)
return ret;
@@ -316,7 +333,7 @@ static int reattach_inode(struct btree_trans *trans,
&name,
inode->bi_subvol ?: inode->bi_inum,
&dir_offset,
- BCH_HASH_SET_MUST_CREATE);
+ STR_HASH_must_create);
if (ret)
return ret;
@@ -363,14 +380,115 @@ static int reattach_subvol(struct btree_trans *trans, struct bkey_s_c_subvolume
return ret;
}
-struct snapshots_seen_entry {
- u32 id;
- u32 equiv;
-};
+static int reconstruct_subvol(struct btree_trans *trans, u32 snapshotid, u32 subvolid, u64 inum)
+{
+ struct bch_fs *c = trans->c;
+
+ if (!bch2_snapshot_is_leaf(c, snapshotid)) {
+ bch_err(c, "need to reconstruct subvol, but have interior node snapshot");
+ return -BCH_ERR_fsck_repair_unimplemented;
+ }
+
+ /*
+ * If inum isn't set, that means we're being called from check_dirents,
+ * not check_inodes - the root of this subvolume doesn't exist or we
+ * would have found it there:
+ */
+ if (!inum) {
+ struct btree_iter inode_iter = {};
+ struct bch_inode_unpacked new_inode;
+ u64 cpu = raw_smp_processor_id();
+
+ bch2_inode_init_early(c, &new_inode);
+ bch2_inode_init_late(&new_inode, bch2_current_time(c), 0, 0, S_IFDIR|0755, 0, NULL);
+
+ new_inode.bi_subvol = subvolid;
+
+ int ret = bch2_inode_create(trans, &inode_iter, &new_inode, snapshotid, cpu) ?:
+ bch2_btree_iter_traverse(&inode_iter) ?:
+ bch2_inode_write(trans, &inode_iter, &new_inode);
+ bch2_trans_iter_exit(trans, &inode_iter);
+ if (ret)
+ return ret;
+
+ inum = new_inode.bi_inum;
+ }
+
+ bch_info(c, "reconstructing subvol %u with root inode %llu", subvolid, inum);
+
+ struct bkey_i_subvolume *new_subvol = bch2_trans_kmalloc(trans, sizeof(*new_subvol));
+ int ret = PTR_ERR_OR_ZERO(new_subvol);
+ if (ret)
+ return ret;
+
+ bkey_subvolume_init(&new_subvol->k_i);
+ new_subvol->k.p.offset = subvolid;
+ new_subvol->v.snapshot = cpu_to_le32(snapshotid);
+ new_subvol->v.inode = cpu_to_le64(inum);
+ ret = bch2_btree_insert_trans(trans, BTREE_ID_subvolumes, &new_subvol->k_i, 0);
+ if (ret)
+ return ret;
+
+ struct btree_iter iter;
+ struct bkey_i_snapshot *s = bch2_bkey_get_mut_typed(trans, &iter,
+ BTREE_ID_snapshots, POS(0, snapshotid),
+ 0, snapshot);
+ ret = PTR_ERR_OR_ZERO(s);
+ bch_err_msg(c, ret, "getting snapshot %u", snapshotid);
+ if (ret)
+ return ret;
+
+ u32 snapshot_tree = le32_to_cpu(s->v.tree);
+
+ s->v.subvol = cpu_to_le32(subvolid);
+ SET_BCH_SNAPSHOT_SUBVOL(&s->v, true);
+ bch2_trans_iter_exit(trans, &iter);
+
+ struct bkey_i_snapshot_tree *st = bch2_bkey_get_mut_typed(trans, &iter,
+ BTREE_ID_snapshot_trees, POS(0, snapshot_tree),
+ 0, snapshot_tree);
+ ret = PTR_ERR_OR_ZERO(st);
+ bch_err_msg(c, ret, "getting snapshot tree %u", snapshot_tree);
+ if (ret)
+ return ret;
+
+ if (!st->v.master_subvol)
+ st->v.master_subvol = cpu_to_le32(subvolid);
+
+ bch2_trans_iter_exit(trans, &iter);
+ return 0;
+}
+
+static int reconstruct_inode(struct btree_trans *trans, u32 snapshot, u64 inum, u64 size, unsigned mode)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_inode_unpacked new_inode;
+
+ bch2_inode_init_early(c, &new_inode);
+ bch2_inode_init_late(&new_inode, bch2_current_time(c), 0, 0, mode|0755, 0, NULL);
+ new_inode.bi_size = size;
+ new_inode.bi_inum = inum;
+
+ return __bch2_fsck_write_inode(trans, &new_inode, snapshot);
+}
+
+static int reconstruct_reg_inode(struct btree_trans *trans, u32 snapshot, u64 inum)
+{
+ struct btree_iter iter = {};
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, SPOS(inum, U64_MAX, snapshot), 0);
+ struct bkey_s_c k = bch2_btree_iter_peek_prev(&iter);
+ bch2_trans_iter_exit(trans, &iter);
+ int ret = bkey_err(k);
+ if (ret)
+ return ret;
+
+ return reconstruct_inode(trans, snapshot, inum, k.k->p.offset << 9, S_IFREG);
+}
struct snapshots_seen {
struct bpos pos;
- DARRAY(struct snapshots_seen_entry) ids;
+ snapshot_id_list ids;
};
static inline void snapshots_seen_exit(struct snapshots_seen *s)
@@ -385,20 +503,15 @@ static inline void snapshots_seen_init(struct snapshots_seen *s)
static int snapshots_seen_add_inorder(struct bch_fs *c, struct snapshots_seen *s, u32 id)
{
- struct snapshots_seen_entry *i, n = {
- .id = id,
- .equiv = bch2_snapshot_equiv(c, id),
- };
- int ret = 0;
-
+ u32 *i;
__darray_for_each(s->ids, i) {
- if (i->id == id)
+ if (*i == id)
return 0;
- if (i->id > id)
+ if (*i > id)
break;
}
- ret = darray_insert_item(&s->ids, i - s->ids.data, n);
+ int ret = darray_insert_item(&s->ids, i - s->ids.data, id);
if (ret)
bch_err(c, "error reallocating snapshots_seen table (size %zu)",
s->ids.size);
@@ -408,42 +521,11 @@ static int snapshots_seen_add_inorder(struct bch_fs *c, struct snapshots_seen *s
static int snapshots_seen_update(struct bch_fs *c, struct snapshots_seen *s,
enum btree_id btree_id, struct bpos pos)
{
- struct snapshots_seen_entry n = {
- .id = pos.snapshot,
- .equiv = bch2_snapshot_equiv(c, pos.snapshot),
- };
- int ret = 0;
-
if (!bkey_eq(s->pos, pos))
s->ids.nr = 0;
-
s->pos = pos;
- s->pos.snapshot = n.equiv;
-
- darray_for_each(s->ids, i) {
- if (i->id == n.id)
- return 0;
- /*
- * We currently don't rigorously track for snapshot cleanup
- * needing to be run, so it shouldn't be a fsck error yet:
- */
- if (i->equiv == n.equiv) {
- bch_err(c, "snapshot deletion did not finish:\n"
- " duplicate keys in btree %s at %llu:%llu snapshots %u, %u (equiv %u)\n",
- bch2_btree_id_str(btree_id),
- pos.inode, pos.offset,
- i->id, n.id, n.equiv);
- set_bit(BCH_FS_need_delete_dead_snapshots, &c->flags);
- return bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_delete_dead_snapshots);
- }
- }
-
- ret = darray_push(&s->ids, n);
- if (ret)
- bch_err(c, "error reallocating snapshots_seen table (size %zu)",
- s->ids.size);
- return ret;
+ return snapshot_list_add_nodup(c, &s->ids, pos.snapshot);
}
/**
@@ -463,12 +545,10 @@ static bool key_visible_in_snapshot(struct bch_fs *c, struct snapshots_seen *see
ssize_t i;
EBUG_ON(id > ancestor);
- EBUG_ON(!bch2_snapshot_is_equiv(c, id));
- EBUG_ON(!bch2_snapshot_is_equiv(c, ancestor));
/* @ancestor should be the snapshot most recently added to @seen */
EBUG_ON(ancestor != seen->pos.snapshot);
- EBUG_ON(ancestor != seen->ids.data[seen->ids.nr - 1].equiv);
+ EBUG_ON(ancestor != darray_last(seen->ids));
if (id == ancestor)
return true;
@@ -487,9 +567,9 @@ static bool key_visible_in_snapshot(struct bch_fs *c, struct snapshots_seen *see
*/
for (i = seen->ids.nr - 2;
- i >= 0 && seen->ids.data[i].equiv >= id;
+ i >= 0 && seen->ids.data[i] >= id;
--i)
- if (bch2_snapshot_is_ancestor(c, id, seen->ids.data[i].equiv))
+ if (bch2_snapshot_is_ancestor(c, id, seen->ids.data[i]))
return false;
return true;
@@ -520,9 +600,6 @@ static int ref_visible2(struct bch_fs *c,
u32 src, struct snapshots_seen *src_seen,
u32 dst, struct snapshots_seen *dst_seen)
{
- src = bch2_snapshot_equiv(c, src);
- dst = bch2_snapshot_equiv(c, dst);
-
if (dst > src) {
swap(dst, src);
swap(dst_seen, src_seen);
@@ -569,7 +646,7 @@ static int add_inode(struct bch_fs *c, struct inode_walker *w,
return darray_push(&w->inodes, ((struct inode_walker_entry) {
.inode = u,
- .snapshot = bch2_snapshot_equiv(c, inode.k->p.snapshot),
+ .snapshot = inode.k->p.snapshot,
}));
}
@@ -585,7 +662,7 @@ static int get_inodes_all_snapshots(struct btree_trans *trans,
w->inodes.nr = 0;
for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inum),
- BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
+ BTREE_ITER_all_snapshots, k, ret) {
if (k.k->p.offset != inum)
break;
@@ -605,21 +682,20 @@ static struct inode_walker_entry *
lookup_inode_for_snapshot(struct bch_fs *c, struct inode_walker *w, struct bkey_s_c k)
{
bool is_whiteout = k.k->type == KEY_TYPE_whiteout;
- u32 snapshot = bch2_snapshot_equiv(c, k.k->p.snapshot);
struct inode_walker_entry *i;
__darray_for_each(w->inodes, i)
- if (bch2_snapshot_is_ancestor(c, snapshot, i->snapshot))
+ if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, i->snapshot))
goto found;
return NULL;
found:
- BUG_ON(snapshot > i->snapshot);
+ BUG_ON(k.k->p.snapshot > i->snapshot);
- if (snapshot != i->snapshot && !is_whiteout) {
+ if (k.k->p.snapshot != i->snapshot && !is_whiteout) {
struct inode_walker_entry new = *i;
- new.snapshot = snapshot;
+ new.snapshot = k.k->p.snapshot;
new.count = 0;
struct printbuf buf = PRINTBUF;
@@ -628,10 +704,10 @@ found:
bch_info(c, "have key for inode %llu:%u but have inode in ancestor snapshot %u\n"
"unexpected because we should always update the inode when we update a key in that inode\n"
"%s",
- w->last_pos.inode, snapshot, i->snapshot, buf.buf);
+ w->last_pos.inode, k.k->p.snapshot, i->snapshot, buf.buf);
printbuf_exit(&buf);
- while (i > w->inodes.data && i[-1].snapshot > snapshot)
+ while (i > w->inodes.data && i[-1].snapshot > k.k->p.snapshot)
--i;
size_t pos = i - w->inodes.data;
@@ -663,10 +739,10 @@ static struct inode_walker_entry *walk_inode(struct btree_trans *trans,
return lookup_inode_for_snapshot(trans->c, w, k);
}
-static int __get_visible_inodes(struct btree_trans *trans,
- struct inode_walker *w,
- struct snapshots_seen *s,
- u64 inum)
+static int get_visible_inodes(struct btree_trans *trans,
+ struct inode_walker *w,
+ struct snapshots_seen *s,
+ u64 inum)
{
struct bch_fs *c = trans->c;
struct btree_iter iter;
@@ -676,19 +752,17 @@ static int __get_visible_inodes(struct btree_trans *trans,
w->inodes.nr = 0;
for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inum),
- BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
- u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
-
+ BTREE_ITER_all_snapshots, k, ret) {
if (k.k->p.offset != inum)
break;
- if (!ref_visible(c, s, s->pos.snapshot, equiv))
+ if (!ref_visible(c, s, s->pos.snapshot, k.k->p.snapshot))
continue;
if (bkey_is_inode(k.k))
add_inode(c, w, k);
- if (equiv >= s->pos.snapshot)
+ if (k.k->p.snapshot >= s->pos.snapshot)
break;
}
bch2_trans_iter_exit(trans, &iter);
@@ -709,7 +783,7 @@ static int check_key_has_snapshot(struct btree_trans *trans,
"key in missing snapshot: %s",
(bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
ret = bch2_btree_delete_at(trans, iter,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: 1;
+ BTREE_UPDATE_internal_snapshot_node) ?: 1;
fsck_err:
printbuf_exit(&buf);
return ret;
@@ -738,8 +812,8 @@ static int hash_redo_key(struct btree_trans *trans,
bch2_hash_set_in_snapshot(trans, desc, hash_info,
(subvol_inum) { 0, k.k->p.inode },
k.k->p.snapshot, tmp,
- BCH_HASH_SET_MUST_CREATE,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
+ STR_HASH_must_create|
+ BTREE_UPDATE_internal_snapshot_node) ?:
bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
}
@@ -768,7 +842,7 @@ static int hash_check_key(struct btree_trans *trans,
for_each_btree_key_norestart(trans, iter, desc.btree_id,
SPOS(hash_k.k->p.inode, hash, hash_k.k->p.snapshot),
- BTREE_ITER_SLOTS, k, ret) {
+ BTREE_ITER_slots, k, ret) {
if (bkey_eq(k.k->p, hash_k.k->p))
break;
@@ -1064,6 +1138,11 @@ static int check_inode(struct btree_trans *trans,
if (ret && !bch2_err_matches(ret, ENOENT))
goto err;
+ if (ret && (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_subvolumes))) {
+ ret = reconstruct_subvol(trans, k.k->p.snapshot, u.bi_subvol, u.bi_inum);
+ goto do_update;
+ }
+
if (fsck_err_on(ret,
c, inode_bi_subvol_missing,
"inode %llu:%u bi_subvol points to missing subvolume %u",
@@ -1081,7 +1160,7 @@ static int check_inode(struct btree_trans *trans,
do_update = true;
}
}
-
+do_update:
if (do_update) {
ret = __bch2_fsck_write_inode(trans, &u, iter->pos.snapshot);
bch_err_msg(c, ret, "in fsck updating inode");
@@ -1105,7 +1184,7 @@ int bch2_check_inodes(struct bch_fs *c)
int ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
POS_MIN,
- BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
+ BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
check_inode(trans, &iter, k, &prev, &s, full)));
@@ -1130,8 +1209,8 @@ static int check_i_sectors_notnested(struct btree_trans *trans, struct inode_wal
i->count = count2;
if (i->count != count2) {
- bch_err(c, "fsck counted i_sectors wrong for inode %llu:%u: got %llu should be %llu",
- w->last_pos.inode, i->snapshot, i->count, count2);
+ bch_err_ratelimited(c, "fsck counted i_sectors wrong for inode %llu:%u: got %llu should be %llu",
+ w->last_pos.inode, i->snapshot, i->count, count2);
return -BCH_ERR_internal_fsck_err;
}
@@ -1234,8 +1313,8 @@ static int overlapping_extents_found(struct btree_trans *trans,
BUG_ON(bkey_le(pos1, bkey_start_pos(&pos2)));
bch2_trans_iter_init(trans, &iter1, btree, pos1,
- BTREE_ITER_ALL_SNAPSHOTS|
- BTREE_ITER_NOT_EXTENTS);
+ BTREE_ITER_all_snapshots|
+ BTREE_ITER_not_extents);
k1 = bch2_btree_iter_peek_upto(&iter1, POS(pos1.inode, U64_MAX));
ret = bkey_err(k1);
if (ret)
@@ -1297,7 +1376,7 @@ static int overlapping_extents_found(struct btree_trans *trans,
trans->extra_disk_res += bch2_bkey_sectors_compressed(k2);
ret = bch2_trans_update_extent_overwrite(trans, old_iter,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE,
+ BTREE_UPDATE_internal_snapshot_node,
k1, k2) ?:
bch2_trans_commit(trans, &res, NULL, BCH_TRANS_COMMIT_no_enospc);
bch2_disk_reservation_put(c, &res);
@@ -1338,7 +1417,6 @@ static int check_overlapping_extents(struct btree_trans *trans,
struct snapshots_seen *seen,
struct extent_ends *extent_ends,
struct bkey_s_c k,
- u32 equiv,
struct btree_iter *iter,
bool *fixed)
{
@@ -1371,10 +1449,6 @@ static int check_overlapping_extents(struct btree_trans *trans,
goto err;
}
- ret = extent_ends_at(c, extent_ends, seen, k);
- if (ret)
- goto err;
-
extent_ends->last_pos = k.k->p;
err:
return ret;
@@ -1411,11 +1485,8 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
struct bch_fs *c = trans->c;
struct inode_walker_entry *i;
struct printbuf buf = PRINTBUF;
- struct bpos equiv = k.k->p;
int ret = 0;
- equiv.snapshot = bch2_snapshot_equiv(c, k.k->p.snapshot);
-
ret = check_key_has_snapshot(trans, iter, k);
if (ret) {
ret = ret < 0 ? ret : 0;
@@ -1438,6 +1509,17 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
goto err;
if (k.k->type != KEY_TYPE_whiteout) {
+ if (!i && (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_inodes))) {
+ ret = reconstruct_reg_inode(trans, k.k->p.snapshot, k.k->p.inode) ?:
+ bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
+ if (ret)
+ goto err;
+
+ inode->last_pos.inode--;
+ ret = -BCH_ERR_transaction_restart_nested;
+ goto err;
+ }
+
if (fsck_err_on(!i, c, extent_in_missing_inode,
"extent in missing inode:\n %s",
(printbuf_reset(&buf),
@@ -1454,8 +1536,7 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
goto delete;
- ret = check_overlapping_extents(trans, s, extent_ends, k,
- equiv.snapshot, iter,
+ ret = check_overlapping_extents(trans, s, extent_ends, k, iter,
&inode->recalculate_sums);
if (ret)
goto err;
@@ -1472,8 +1553,8 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
for (;
inode->inodes.data && i >= inode->inodes.data;
--i) {
- if (i->snapshot > equiv.snapshot ||
- !key_visible_in_snapshot(c, s, i->snapshot, equiv.snapshot))
+ if (i->snapshot > k.k->p.snapshot ||
+ !key_visible_in_snapshot(c, s, i->snapshot, k.k->p.snapshot))
continue;
if (k.k->type != KEY_TYPE_whiteout) {
@@ -1490,7 +1571,7 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
bch2_btree_iter_set_snapshot(&iter2, i->snapshot);
ret = bch2_btree_iter_traverse(&iter2) ?:
bch2_btree_delete_at(trans, &iter2,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ BTREE_UPDATE_internal_snapshot_node);
bch2_trans_iter_exit(trans, &iter2);
if (ret)
goto err;
@@ -1504,6 +1585,12 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
i->seen_this_pos = true;
}
+
+ if (k.k->type != KEY_TYPE_whiteout) {
+ ret = extent_ends_at(c, extent_ends, s, k);
+ if (ret)
+ goto err;
+ }
out:
err:
fsck_err:
@@ -1511,7 +1598,7 @@ fsck_err:
bch_err_fn(c, ret);
return ret;
delete:
- ret = bch2_btree_delete_at(trans, iter, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ ret = bch2_btree_delete_at(trans, iter, BTREE_UPDATE_internal_snapshot_node);
goto out;
}
@@ -1532,7 +1619,7 @@ int bch2_check_extents(struct bch_fs *c)
int ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter, BTREE_ID_extents,
POS(BCACHEFS_ROOT_INO, 0),
- BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
+ BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
&res, NULL,
BCH_TRANS_COMMIT_no_enospc, ({
bch2_disk_reservation_put(c, &res);
@@ -1557,7 +1644,7 @@ int bch2_check_indirect_extents(struct bch_fs *c)
int ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter, BTREE_ID_reflink,
POS_MIN,
- BTREE_ITER_PREFETCH, k,
+ BTREE_ITER_prefetch, k,
&res, NULL,
BCH_TRANS_COMMIT_no_enospc, ({
bch2_disk_reservation_put(c, &res);
@@ -1584,8 +1671,8 @@ static int check_subdir_count_notnested(struct btree_trans *trans, struct inode_
return count2;
if (i->count != count2) {
- bch_err(c, "fsck counted subdirectories wrong: got %llu should be %llu",
- i->count, count2);
+ bch_err_ratelimited(c, "fsck counted subdirectories wrong for inum %llu:%u: got %llu should be %llu",
+ w->last_pos.inode, i->snapshot, i->count, count2);
i->count = count2;
if (i->inode.bi_nlink == i->count)
continue;
@@ -1626,6 +1713,15 @@ static int check_dirent_inode_dirent(struct btree_trans *trans,
if (inode_points_to_dirent(target, d))
return 0;
+ if (bch2_inode_should_have_bp(target) &&
+ !fsck_err(c, inode_wrong_backpointer,
+ "dirent points to inode that does not point back:\n %s",
+ (bch2_bkey_val_to_text(&buf, c, d.s_c),
+ prt_printf(&buf, "\n "),
+ bch2_inode_unpacked_to_text(&buf, target),
+ buf.buf)))
+ goto out_noiter;
+
if (!target->bi_dir &&
!target->bi_dir_offset) {
target->bi_dir = d.k->p.inode;
@@ -1694,6 +1790,7 @@ out:
err:
fsck_err:
bch2_trans_iter_exit(trans, &bp_iter);
+out_noiter:
printbuf_exit(&buf);
bch_err_fn(c, ret);
return ret;
@@ -1782,6 +1879,7 @@ static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *
u32 parent_subvol = le32_to_cpu(d.v->d_parent_subvol);
u32 target_subvol = le32_to_cpu(d.v->d_child_subvol);
u32 parent_snapshot;
+ u32 new_parent_subvol = 0;
u64 parent_inum;
struct printbuf buf = PRINTBUF;
int ret = 0;
@@ -1790,6 +1888,27 @@ static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *
if (ret && !bch2_err_matches(ret, ENOENT))
return ret;
+ if (ret ||
+ (!ret && !bch2_snapshot_is_ancestor(c, parent_snapshot, d.k->p.snapshot))) {
+ int ret2 = find_snapshot_subvol(trans, d.k->p.snapshot, &new_parent_subvol);
+ if (ret2 && !bch2_err_matches(ret, ENOENT))
+ return ret2;
+ }
+
+ if (ret &&
+ !new_parent_subvol &&
+ (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_subvolumes))) {
+ /*
+ * Couldn't find a subvol for dirent's snapshot - but we lost
+ * subvols, so we need to reconstruct:
+ */
+ ret = reconstruct_subvol(trans, d.k->p.snapshot, parent_subvol, 0);
+ if (ret)
+ return ret;
+
+ parent_snapshot = d.k->p.snapshot;
+ }
+
if (fsck_err_on(ret, c, dirent_to_missing_parent_subvol,
"dirent parent_subvol points to missing subvolume\n%s",
(bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf)) ||
@@ -1798,10 +1917,10 @@ static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *
"dirent not visible in parent_subvol (not an ancestor of subvol snap %u)\n%s",
parent_snapshot,
(bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf))) {
- u32 new_parent_subvol;
- ret = find_snapshot_subvol(trans, d.k->p.snapshot, &new_parent_subvol);
- if (ret)
- goto err;
+ if (!new_parent_subvol) {
+ bch_err(c, "could not find a subvol for snapshot %u", d.k->p.snapshot);
+ return -BCH_ERR_fsck_repair_unimplemented;
+ }
struct bkey_i_dirent *new_dirent = bch2_bkey_make_mut_typed(trans, iter, &d.s_c, 0, dirent);
ret = PTR_ERR_OR_ZERO(new_dirent);
@@ -1847,9 +1966,16 @@ static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *
ret = lookup_inode(trans, target_inum, &subvol_root, &target_snapshot);
if (ret && !bch2_err_matches(ret, ENOENT))
- return ret;
+ goto err;
+
+ if (ret) {
+ bch_err(c, "subvol %u points to missing inode root %llu", target_subvol, target_inum);
+ ret = -BCH_ERR_fsck_repair_unimplemented;
+ ret = 0;
+ goto err;
+ }
- if (fsck_err_on(parent_subvol != subvol_root.bi_parent_subvol,
+ if (fsck_err_on(!ret && parent_subvol != subvol_root.bi_parent_subvol,
c, inode_bi_parent_wrong,
"subvol root %llu has wrong bi_parent_subvol: got %u, should be %u",
target_inum,
@@ -1857,13 +1983,13 @@ static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *
subvol_root.bi_parent_subvol = parent_subvol;
ret = __bch2_fsck_write_inode(trans, &subvol_root, target_snapshot);
if (ret)
- return ret;
+ goto err;
}
ret = check_dirent_target(trans, iter, d, &subvol_root,
target_snapshot);
if (ret)
- return ret;
+ goto err;
out:
err:
fsck_err:
@@ -1880,10 +2006,8 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
struct snapshots_seen *s)
{
struct bch_fs *c = trans->c;
- struct bkey_s_c_dirent d;
struct inode_walker_entry *i;
struct printbuf buf = PRINTBUF;
- struct bpos equiv;
int ret = 0;
ret = check_key_has_snapshot(trans, iter, k);
@@ -1892,9 +2016,6 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
goto out;
}
- equiv = k.k->p;
- equiv.snapshot = bch2_snapshot_equiv(c, k.k->p.snapshot);
-
ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
if (ret)
goto err;
@@ -1919,12 +2040,23 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
*hash_info = bch2_hash_info_init(c, &dir->inodes.data[0].inode);
dir->first_this_inode = false;
+ if (!i && (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_inodes))) {
+ ret = reconstruct_inode(trans, k.k->p.snapshot, k.k->p.inode, 0, S_IFDIR) ?:
+ bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
+ if (ret)
+ goto err;
+
+ dir->last_pos.inode--;
+ ret = -BCH_ERR_transaction_restart_nested;
+ goto err;
+ }
+
if (fsck_err_on(!i, c, dirent_in_missing_dir_inode,
"dirent in nonexisting directory:\n%s",
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
ret = bch2_btree_delete_at(trans, iter,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ BTREE_UPDATE_internal_snapshot_node);
goto out;
}
@@ -1953,21 +2085,20 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
if (k.k->type != KEY_TYPE_dirent)
goto out;
- d = bkey_s_c_to_dirent(k);
+ struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
if (d.v->d_type == DT_SUBVOL) {
ret = check_dirent_to_subvol(trans, iter, d);
if (ret)
goto err;
} else {
- ret = __get_visible_inodes(trans, target, s, le64_to_cpu(d.v->d_inum));
+ ret = get_visible_inodes(trans, target, s, le64_to_cpu(d.v->d_inum));
if (ret)
goto err;
if (fsck_err_on(!target->inodes.nr,
c, dirent_to_missing_inode,
- "dirent points to missing inode: (equiv %u)\n%s",
- equiv.snapshot,
+ "dirent points to missing inode:\n%s",
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k),
buf.buf))) {
@@ -1984,7 +2115,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
}
if (d.v->d_type == DT_DIR)
- for_each_visible_inode(c, s, dir, equiv.snapshot, i)
+ for_each_visible_inode(c, s, dir, d.k->p.snapshot, i)
i->count++;
}
out:
@@ -2011,7 +2142,7 @@ int bch2_check_dirents(struct bch_fs *c)
int ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter, BTREE_ID_dirents,
POS(BCACHEFS_ROOT_INO, 0),
- BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
+ BTREE_ITER_prefetch|BTREE_ITER_all_snapshots,
k,
NULL, NULL,
BCH_TRANS_COMMIT_no_enospc,
@@ -2075,7 +2206,7 @@ int bch2_check_xattrs(struct bch_fs *c)
ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
POS(BCACHEFS_ROOT_INO, 0),
- BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
+ BTREE_ITER_prefetch|BTREE_ITER_all_snapshots,
k,
NULL, NULL,
BCH_TRANS_COMMIT_no_enospc,
@@ -2098,17 +2229,21 @@ static int check_root_trans(struct btree_trans *trans)
if (mustfix_fsck_err_on(ret, c, root_subvol_missing,
"root subvol missing")) {
- struct bkey_i_subvolume root_subvol;
+ struct bkey_i_subvolume *root_subvol =
+ bch2_trans_kmalloc(trans, sizeof(*root_subvol));
+ ret = PTR_ERR_OR_ZERO(root_subvol);
+ if (ret)
+ goto err;
snapshot = U32_MAX;
inum = BCACHEFS_ROOT_INO;
- bkey_subvolume_init(&root_subvol.k_i);
- root_subvol.k.p.offset = BCACHEFS_ROOT_SUBVOL;
- root_subvol.v.flags = 0;
- root_subvol.v.snapshot = cpu_to_le32(snapshot);
- root_subvol.v.inode = cpu_to_le64(inum);
- ret = bch2_btree_insert_trans(trans, BTREE_ID_subvolumes, &root_subvol.k_i, 0);
+ bkey_subvolume_init(&root_subvol->k_i);
+ root_subvol->k.p.offset = BCACHEFS_ROOT_SUBVOL;
+ root_subvol->v.flags = 0;
+ root_subvol->v.snapshot = cpu_to_le32(snapshot);
+ root_subvol->v.inode = cpu_to_le64(inum);
+ ret = bch2_btree_insert_trans(trans, BTREE_ID_subvolumes, &root_subvol->k_i, 0);
bch_err_msg(c, ret, "writing root subvol");
if (ret)
goto err;
@@ -2238,7 +2373,7 @@ int bch2_check_subvolume_structure(struct bch_fs *c)
{
int ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter,
- BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_PREFETCH, k,
+ BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_prefetch, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
check_subvol_path(trans, &iter, k)));
bch_err_fn(c, ret);
@@ -2273,7 +2408,7 @@ static int check_path(struct btree_trans *trans, pathbuf *p, struct bkey_s_c ino
struct btree_iter inode_iter = {};
struct bch_inode_unpacked inode;
struct printbuf buf = PRINTBUF;
- u32 snapshot = bch2_snapshot_equiv(c, inode_k.k->p.snapshot);
+ u32 snapshot = inode_k.k->p.snapshot;
int ret = 0;
p->nr = 0;
@@ -2375,9 +2510,9 @@ int bch2_check_directory_structure(struct bch_fs *c)
ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter, BTREE_ID_inodes, POS_MIN,
- BTREE_ITER_INTENT|
- BTREE_ITER_PREFETCH|
- BTREE_ITER_ALL_SNAPSHOTS, k,
+ BTREE_ITER_intent|
+ BTREE_ITER_prefetch|
+ BTREE_ITER_all_snapshots, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
if (!bkey_is_inode(k.k))
continue;
@@ -2477,9 +2612,9 @@ static int check_nlinks_find_hardlinks(struct bch_fs *c,
int ret = bch2_trans_run(c,
for_each_btree_key(trans, iter, BTREE_ID_inodes,
POS(0, start),
- BTREE_ITER_INTENT|
- BTREE_ITER_PREFETCH|
- BTREE_ITER_ALL_SNAPSHOTS, k, ({
+ BTREE_ITER_intent|
+ BTREE_ITER_prefetch|
+ BTREE_ITER_all_snapshots, k, ({
if (!bkey_is_inode(k.k))
continue;
@@ -2520,9 +2655,9 @@ static int check_nlinks_walk_dirents(struct bch_fs *c, struct nlink_table *links
int ret = bch2_trans_run(c,
for_each_btree_key(trans, iter, BTREE_ID_dirents, POS_MIN,
- BTREE_ITER_INTENT|
- BTREE_ITER_PREFETCH|
- BTREE_ITER_ALL_SNAPSHOTS, k, ({
+ BTREE_ITER_intent|
+ BTREE_ITER_prefetch|
+ BTREE_ITER_all_snapshots, k, ({
ret = snapshots_seen_update(c, &s, iter.btree_id, k.k->p);
if (ret)
break;
@@ -2533,8 +2668,7 @@ static int check_nlinks_walk_dirents(struct bch_fs *c, struct nlink_table *links
if (d.v->d_type != DT_DIR &&
d.v->d_type != DT_SUBVOL)
inc_link(c, &s, links, range_start, range_end,
- le64_to_cpu(d.v->d_inum),
- bch2_snapshot_equiv(c, d.k->p.snapshot));
+ le64_to_cpu(d.v->d_inum), d.k->p.snapshot);
}
0;
})));
@@ -2597,7 +2731,7 @@ static int check_nlinks_update_hardlinks(struct bch_fs *c,
int ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
POS(0, range_start),
- BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
+ BTREE_ITER_intent|BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
check_nlinks_update_inode(trans, &iter, k, links, &idx, range_end)));
if (ret < 0) {
@@ -2665,7 +2799,7 @@ static int fix_reflink_p_key(struct btree_trans *trans, struct btree_iter *iter,
u->v.front_pad = 0;
u->v.back_pad = 0;
- return bch2_trans_update(trans, iter, &u->k_i, BTREE_TRIGGER_NORUN);
+ return bch2_trans_update(trans, iter, &u->k_i, BTREE_TRIGGER_norun);
}
int bch2_fix_reflink_p(struct bch_fs *c)
@@ -2676,8 +2810,8 @@ int bch2_fix_reflink_p(struct bch_fs *c)
int ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter,
BTREE_ID_extents, POS_MIN,
- BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|
- BTREE_ITER_ALL_SNAPSHOTS, k,
+ BTREE_ITER_intent|BTREE_ITER_prefetch|
+ BTREE_ITER_all_snapshots, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
fix_reflink_p_key(trans, &iter, k)));
bch_err_fn(c, ret);
diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c
index 2b5e06770ab3..aafa79fa6351 100644
--- a/fs/bcachefs/inode.c
+++ b/fs/bcachefs/inode.c
@@ -339,7 +339,7 @@ int bch2_inode_peek_nowarn(struct btree_trans *trans,
k = bch2_bkey_get_iter(trans, iter, BTREE_ID_inodes,
SPOS(0, inum.inum, snapshot),
- flags|BTREE_ITER_CACHED);
+ flags|BTREE_ITER_cached);
ret = bkey_err(k);
if (ret)
return ret;
@@ -371,7 +371,7 @@ int bch2_inode_peek(struct btree_trans *trans,
int bch2_inode_write_flags(struct btree_trans *trans,
struct btree_iter *iter,
struct bch_inode_unpacked *inode,
- enum btree_update_flags flags)
+ enum btree_iter_update_trigger_flags flags)
{
struct bkey_inode_buf *inode_p;
@@ -399,7 +399,7 @@ int __bch2_fsck_write_inode(struct btree_trans *trans,
return bch2_btree_insert_nonextent(trans, BTREE_ID_inodes,
&inode_p->inode.k_i,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ BTREE_UPDATE_internal_snapshot_node);
}
int bch2_fsck_write_inode(struct btree_trans *trans,
@@ -473,7 +473,7 @@ fsck_err:
}
int bch2_inode_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
struct printbuf *err)
{
struct bkey_s_c_inode inode = bkey_s_c_to_inode(k);
@@ -490,7 +490,7 @@ fsck_err:
}
int bch2_inode_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
struct printbuf *err)
{
struct bkey_s_c_inode_v2 inode = bkey_s_c_to_inode_v2(k);
@@ -507,7 +507,7 @@ fsck_err:
}
int bch2_inode_v3_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
struct printbuf *err)
{
struct bkey_s_c_inode_v3 inode = bkey_s_c_to_inode_v3(k);
@@ -535,29 +535,19 @@ static void __bch2_inode_unpacked_to_text(struct printbuf *out,
struct bch_inode_unpacked *inode)
{
printbuf_indent_add(out, 2);
- prt_printf(out, "mode=%o", inode->bi_mode);
- prt_newline(out);
+ prt_printf(out, "mode=%o\n", inode->bi_mode);
prt_str(out, "flags=");
prt_bitflags(out, bch2_inode_flag_strs, inode->bi_flags & ((1U << 20) - 1));
- prt_printf(out, " (%x)", inode->bi_flags);
- prt_newline(out);
+ prt_printf(out, " (%x)\n", inode->bi_flags);
- prt_printf(out, "journal_seq=%llu", inode->bi_journal_seq);
- prt_newline(out);
-
- prt_printf(out, "bi_size=%llu", inode->bi_size);
- prt_newline(out);
-
- prt_printf(out, "bi_sectors=%llu", inode->bi_sectors);
- prt_newline(out);
-
- prt_newline(out);
- prt_printf(out, "bi_version=%llu", inode->bi_version);
+ prt_printf(out, "journal_seq=%llu\n", inode->bi_journal_seq);
+ prt_printf(out, "bi_size=%llu\n", inode->bi_size);
+ prt_printf(out, "bi_sectors=%llu\n", inode->bi_sectors);
+ prt_printf(out, "bi_version=%llu\n", inode->bi_version);
#define x(_name, _bits) \
- prt_printf(out, #_name "=%llu", (u64) inode->_name); \
- prt_newline(out);
+ prt_printf(out, #_name "=%llu\n", (u64) inode->_name);
BCH_INODE_FIELDS_v3()
#undef x
printbuf_indent_sub(out, 2);
@@ -604,11 +594,11 @@ int bch2_trigger_inode(struct btree_trans *trans,
enum btree_id btree_id, unsigned level,
struct bkey_s_c old,
struct bkey_s new,
- unsigned flags)
+ enum btree_iter_update_trigger_flags flags)
{
- s64 nr = bkey_is_inode(new.k) - bkey_is_inode(old.k);
+ s64 nr = (s64) bkey_is_inode(new.k) - (s64) bkey_is_inode(old.k);
- if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
+ if (flags & BTREE_TRIGGER_transactional) {
if (nr) {
int ret = bch2_replicas_deltas_realloc(trans, 0);
if (ret)
@@ -627,13 +617,13 @@ int bch2_trigger_inode(struct btree_trans *trans,
}
}
- if ((flags & BTREE_TRIGGER_ATOMIC) && (flags & BTREE_TRIGGER_INSERT)) {
+ if ((flags & BTREE_TRIGGER_atomic) && (flags & BTREE_TRIGGER_insert)) {
BUG_ON(!trans->journal_res.seq);
bkey_s_to_inode_v3(new).v->bi_journal_seq = cpu_to_le64(trans->journal_res.seq);
}
- if (flags & BTREE_TRIGGER_GC) {
+ if (flags & BTREE_TRIGGER_gc) {
struct bch_fs *c = trans->c;
percpu_down_read(&c->mark_lock);
@@ -645,7 +635,7 @@ int bch2_trigger_inode(struct btree_trans *trans,
}
int bch2_inode_generation_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
struct printbuf *err)
{
int ret = 0;
@@ -762,8 +752,8 @@ int bch2_inode_create(struct btree_trans *trans,
pos = start;
bch2_trans_iter_init(trans, iter, BTREE_ID_inodes, POS(0, pos),
- BTREE_ITER_ALL_SNAPSHOTS|
- BTREE_ITER_INTENT);
+ BTREE_ITER_all_snapshots|
+ BTREE_ITER_intent);
again:
while ((k = bch2_btree_iter_peek(iter)).k &&
!(ret = bkey_err(k)) &&
@@ -824,7 +814,7 @@ static int bch2_inode_delete_keys(struct btree_trans *trans,
* extent iterator:
*/
bch2_trans_iter_init(trans, &iter, id, POS(inum.inum, 0),
- BTREE_ITER_INTENT);
+ BTREE_ITER_intent);
while (1) {
bch2_trans_begin(trans);
@@ -846,7 +836,7 @@ static int bch2_inode_delete_keys(struct btree_trans *trans,
bkey_init(&delete.k);
delete.k.p = iter.pos;
- if (iter.flags & BTREE_ITER_IS_EXTENTS)
+ if (iter.flags & BTREE_ITER_is_extents)
bch2_key_resize(&delete.k,
bpos_min(end, k.k->p).offset -
iter.pos.offset);
@@ -895,7 +885,7 @@ retry:
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
SPOS(0, inum.inum, snapshot),
- BTREE_ITER_INTENT|BTREE_ITER_CACHED);
+ BTREE_ITER_intent|BTREE_ITER_cached);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1055,7 +1045,7 @@ retry:
bch2_trans_begin(trans);
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
- SPOS(0, inum, snapshot), BTREE_ITER_INTENT);
+ SPOS(0, inum, snapshot), BTREE_ITER_intent);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1100,7 +1090,7 @@ static int may_delete_deleted_inode(struct btree_trans *trans,
struct bch_inode_unpacked inode;
int ret;
- k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes, pos, BTREE_ITER_CACHED);
+ k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes, pos, BTREE_ITER_cached);
ret = bkey_err(k);
if (ret)
return ret;
@@ -1152,7 +1142,7 @@ static int may_delete_deleted_inode(struct btree_trans *trans,
inode.bi_flags &= ~BCH_INODE_unlinked;
ret = bch2_inode_write_flags(trans, &inode_iter, &inode,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ BTREE_UPDATE_internal_snapshot_node);
bch_err_msg(c, ret, "clearing inode unlinked flag");
if (ret)
goto out;
@@ -1199,7 +1189,7 @@ again:
* flushed and we'd spin:
*/
ret = for_each_btree_key_commit(trans, iter, BTREE_ID_deleted_inodes, POS_MIN,
- BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
+ BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
ret = may_delete_deleted_inode(trans, &iter, k.k->p, &need_another_pass);
if (ret > 0) {
diff --git a/fs/bcachefs/inode.h b/fs/bcachefs/inode.h
index 056298050550..679f5f5e5d15 100644
--- a/fs/bcachefs/inode.h
+++ b/fs/bcachefs/inode.h
@@ -6,19 +6,20 @@
#include "bkey_methods.h"
#include "opts.h"
-enum bkey_invalid_flags;
+enum bch_validate_flags;
extern const char * const bch2_inode_opts[];
int bch2_inode_invalid(struct bch_fs *, struct bkey_s_c,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
int bch2_inode_v2_invalid(struct bch_fs *, struct bkey_s_c,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
int bch2_inode_v3_invalid(struct bch_fs *, struct bkey_s_c,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
void bch2_inode_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
int bch2_trigger_inode(struct btree_trans *, enum btree_id, unsigned,
- struct bkey_s_c, struct bkey_s, unsigned);
+ struct bkey_s_c, struct bkey_s,
+ enum btree_iter_update_trigger_flags);
#define bch2_bkey_ops_inode ((struct bkey_ops) { \
.key_invalid = bch2_inode_invalid, \
@@ -49,7 +50,7 @@ static inline bool bkey_is_inode(const struct bkey *k)
}
int bch2_inode_generation_invalid(struct bch_fs *, struct bkey_s_c,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
void bch2_inode_generation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
#define bch2_bkey_ops_inode_generation ((struct bkey_ops) { \
@@ -101,7 +102,7 @@ int bch2_inode_peek(struct btree_trans *, struct btree_iter *,
struct bch_inode_unpacked *, subvol_inum, unsigned);
int bch2_inode_write_flags(struct btree_trans *, struct btree_iter *,
- struct bch_inode_unpacked *, enum btree_update_flags);
+ struct bch_inode_unpacked *, enum btree_iter_update_trigger_flags);
static inline int bch2_inode_write(struct btree_trans *trans,
struct btree_iter *iter,
@@ -220,6 +221,14 @@ static inline void bch2_inode_nlink_set(struct bch_inode_unpacked *bi,
int bch2_inode_nlink_inc(struct bch_inode_unpacked *);
void bch2_inode_nlink_dec(struct btree_trans *, struct bch_inode_unpacked *);
+static inline bool bch2_inode_should_have_bp(struct bch_inode_unpacked *inode)
+{
+ bool inode_has_bp = inode->bi_dir || inode->bi_dir_offset;
+
+ return S_ISDIR(inode->bi_mode) ||
+ (!inode->bi_nlink && inode_has_bp);
+}
+
struct bch_opts bch2_inode_opts_to_opts(struct bch_inode_unpacked *);
void bch2_inode_opts_get(struct bch_io_opts *, struct bch_fs *,
struct bch_inode_unpacked *);
diff --git a/fs/bcachefs/io_misc.c b/fs/bcachefs/io_misc.c
index 1baf78594cca..4ec979b4b23e 100644
--- a/fs/bcachefs/io_misc.c
+++ b/fs/bcachefs/io_misc.c
@@ -198,7 +198,7 @@ int bch2_fpunch(struct bch_fs *c, subvol_inum inum, u64 start, u64 end,
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
POS(inum.inum, start),
- BTREE_ITER_INTENT);
+ BTREE_ITER_intent);
ret = bch2_fpunch_at(trans, &iter, inum, end, i_sectors_delta);
@@ -230,7 +230,7 @@ static int truncate_set_isize(struct btree_trans *trans,
struct bch_inode_unpacked inode_u;
int ret;
- ret = bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_INTENT) ?:
+ ret = bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_intent) ?:
(inode_u.bi_size = new_i_size, 0) ?:
bch2_inode_write(trans, &iter, &inode_u);
@@ -256,7 +256,7 @@ static int __bch2_resume_logged_op_truncate(struct btree_trans *trans,
bch2_trans_iter_init(trans, &fpunch_iter, BTREE_ID_extents,
POS(inum.inum, round_up(new_i_size, block_bytes(c)) >> 9),
- BTREE_ITER_INTENT);
+ BTREE_ITER_intent);
ret = bch2_fpunch_at(trans, &fpunch_iter, inum, U64_MAX, i_sectors_delta);
bch2_trans_iter_exit(trans, &fpunch_iter);
@@ -264,6 +264,7 @@ static int __bch2_resume_logged_op_truncate(struct btree_trans *trans,
ret = 0;
err:
bch2_logged_op_finish(trans, op_k);
+ bch_err_fn(c, ret);
return ret;
}
@@ -316,7 +317,7 @@ static int adjust_i_size(struct btree_trans *trans, subvol_inum inum, u64 offset
offset <<= 9;
len <<= 9;
- ret = bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_INTENT);
+ ret = bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_intent);
if (ret)
return ret;
@@ -364,7 +365,7 @@ static int __bch2_resume_logged_op_finsert(struct btree_trans *trans,
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
POS(inum.inum, 0),
- BTREE_ITER_INTENT);
+ BTREE_ITER_intent);
switch (op->v.state) {
case LOGGED_OP_FINSERT_start:
@@ -476,6 +477,7 @@ case LOGGED_OP_FINSERT_finish:
break;
}
err:
+ bch_err_fn(c, ret);
bch2_logged_op_finish(trans, op_k);
bch2_trans_iter_exit(trans, &iter);
return ret;
diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c
index 8a556e6d1ab6..f57486794484 100644
--- a/fs/bcachefs/io_read.c
+++ b/fs/bcachefs/io_read.c
@@ -378,7 +378,7 @@ static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio
bch2_bkey_buf_init(&sk);
bch2_trans_iter_init(trans, &iter, rbio->data_btree,
- rbio->read_pos, BTREE_ITER_SLOTS);
+ rbio->read_pos, BTREE_ITER_slots);
retry:
rbio->bio.bi_status = 0;
@@ -487,7 +487,7 @@ static int __bch2_rbio_narrow_crcs(struct btree_trans *trans,
return 0;
k = bch2_bkey_get_iter(trans, &iter, rbio->data_btree, rbio->data_pos,
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+ BTREE_ITER_slots|BTREE_ITER_intent);
if ((ret = bkey_err(k)))
goto out;
@@ -523,7 +523,7 @@ static int __bch2_rbio_narrow_crcs(struct btree_trans *trans,
goto out;
ret = bch2_trans_update(trans, &iter, new,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ BTREE_UPDATE_internal_snapshot_node);
out:
bch2_trans_iter_exit(trans, &iter);
return ret;
@@ -541,7 +541,6 @@ static void __bch2_read_endio(struct work_struct *work)
struct bch_read_bio *rbio =
container_of(work, struct bch_read_bio, work);
struct bch_fs *c = rbio->c;
- struct bch_dev *ca = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
struct bio *src = &rbio->bio;
struct bio *dst = &bch2_rbio_parent(rbio)->bio;
struct bvec_iter dst_iter = rbio->bvec_iter;
@@ -647,13 +646,15 @@ csum_err:
prt_str(&buf, "data ");
bch2_csum_err_msg(&buf, crc.csum_type, rbio->pick.crc.csum, csum);
- bch_err_inum_offset_ratelimited(ca,
- rbio->read_pos.inode,
- rbio->read_pos.offset << 9,
- "data %s", buf.buf);
+ struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL;
+ if (ca) {
+ bch_err_inum_offset_ratelimited(ca,
+ rbio->read_pos.inode,
+ rbio->read_pos.offset << 9,
+ "data %s", buf.buf);
+ bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
+ }
printbuf_exit(&buf);
-
- bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
goto out;
decompression_err:
@@ -675,7 +676,7 @@ static void bch2_read_endio(struct bio *bio)
struct bch_read_bio *rbio =
container_of(bio, struct bch_read_bio, bio);
struct bch_fs *c = rbio->c;
- struct bch_dev *ca = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
+ struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL;
struct workqueue_struct *wq = NULL;
enum rbio_context context = RBIO_CONTEXT_NULL;
@@ -687,17 +688,21 @@ static void bch2_read_endio(struct bio *bio)
if (!rbio->split)
rbio->bio.bi_end_io = rbio->end_io;
- if (bch2_dev_inum_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_read,
- rbio->read_pos.inode,
- rbio->read_pos.offset,
- "data read error: %s",
- bch2_blk_status_to_str(bio->bi_status))) {
+ if (bio->bi_status) {
+ if (ca) {
+ bch_err_inum_offset_ratelimited(ca,
+ rbio->read_pos.inode,
+ rbio->read_pos.offset,
+ "data read error: %s",
+ bch2_blk_status_to_str(bio->bi_status));
+ bch2_io_error(ca, BCH_MEMBER_ERROR_read);
+ }
bch2_rbio_error(rbio, READ_RETRY_AVOID, bio->bi_status);
return;
}
if (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
- ptr_stale(ca, &rbio->pick.ptr)) {
+ (ca && dev_ptr_stale(ca, &rbio->pick.ptr))) {
trace_and_count(c, read_reuse_race, &rbio->bio);
if (rbio->flags & BCH_READ_RETRY_IF_STALE)
@@ -758,22 +763,21 @@ err:
}
static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
+ struct bch_dev *ca,
struct bkey_s_c k,
struct bch_extent_ptr ptr)
{
struct bch_fs *c = trans->c;
- struct bch_dev *ca = bch_dev_bkey_exists(c, ptr.dev);
struct btree_iter iter;
struct printbuf buf = PRINTBUF;
int ret;
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
- PTR_BUCKET_POS(c, &ptr),
- BTREE_ITER_CACHED);
+ PTR_BUCKET_POS(ca, &ptr),
+ BTREE_ITER_cached);
- prt_printf(&buf, "Attempting to read from stale dirty pointer:");
+ prt_printf(&buf, "Attempting to read from stale dirty pointer:\n");
printbuf_indent_add(&buf, 2);
- prt_newline(&buf);
bch2_bkey_val_to_text(&buf, c, k);
prt_newline(&buf);
@@ -801,7 +805,6 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
struct bch_fs *c = trans->c;
struct extent_ptr_decoded pick;
struct bch_read_bio *rbio = NULL;
- struct bch_dev *ca = NULL;
struct promote_op *promote = NULL;
bool bounce = false, read_full = false, narrow_crcs = false;
struct bpos data_pos = bkey_start_pos(k.k);
@@ -832,7 +835,7 @@ retry_pick:
goto err;
}
- ca = bch_dev_bkey_exists(c, pick.ptr.dev);
+ struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
/*
* Stale dirty pointers are treated as IO errors, but @failed isn't
@@ -842,9 +845,11 @@ retry_pick:
*/
if ((flags & BCH_READ_IN_RETRY) &&
!pick.ptr.cached &&
- unlikely(ptr_stale(ca, &pick.ptr))) {
- read_from_stale_dirty_pointer(trans, k, pick.ptr);
+ ca &&
+ unlikely(dev_ptr_stale(ca, &pick.ptr))) {
+ read_from_stale_dirty_pointer(trans, ca, k, pick.ptr);
bch2_mark_io_failure(failed, &pick);
+ percpu_ref_put(&ca->io_ref);
goto retry_pick;
}
@@ -859,8 +864,11 @@ retry_pick:
* can happen if we retry, and the extent we were going to read
* has been merged in the meantime:
*/
- if (pick.crc.compressed_size > orig->bio.bi_vcnt * PAGE_SECTORS)
+ if (pick.crc.compressed_size > orig->bio.bi_vcnt * PAGE_SECTORS) {
+ if (ca)
+ percpu_ref_put(&ca->io_ref);
goto hole;
+ }
iter.bi_size = pick.crc.compressed_size << 9;
goto get_bio;
@@ -965,7 +973,7 @@ get_bio:
rbio->bvec_iter = iter;
rbio->offset_into_extent= offset_into_extent;
rbio->flags = flags;
- rbio->have_ioref = pick_ret > 0 && bch2_dev_get_ioref(ca, READ);
+ rbio->have_ioref = ca != NULL;
rbio->narrow_crcs = narrow_crcs;
rbio->hole = 0;
rbio->retry = 0;
@@ -995,7 +1003,7 @@ get_bio:
* If it's being moved internally, we don't want to flag it as a cache
* hit:
*/
- if (pick.ptr.cached && !(flags & BCH_READ_NODECODE))
+ if (ca && pick.ptr.cached && !(flags & BCH_READ_NODECODE))
bch2_bucket_io_time_reset(trans, pick.ptr.dev,
PTR_BUCKET_NR(ca, &pick.ptr), READ);
@@ -1113,7 +1121,7 @@ retry:
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
SPOS(inum.inum, bvec_iter.bi_sector, snapshot),
- BTREE_ITER_SLOTS);
+ BTREE_ITER_slots);
while (1) {
unsigned bytes, sectors, offset_into_extent;
enum btree_id data_btree = BTREE_ID_extents;
diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c
index f137252bccc5..9401d13e31bb 100644
--- a/fs/bcachefs/io_write.c
+++ b/fs/bcachefs/io_write.c
@@ -166,7 +166,7 @@ int bch2_sum_sector_overwrites(struct btree_trans *trans,
bch2_trans_copy_iter(&iter, extent_iter);
for_each_btree_key_upto_continue_norestart(iter,
- new->k.p, BTREE_ITER_SLOTS, old, ret) {
+ new->k.p, BTREE_ITER_slots, old, ret) {
s64 sectors = min(new->k.p.offset, old.k->p.offset) -
max(bkey_start_offset(&new->k),
bkey_start_offset(old.k));
@@ -199,9 +199,6 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
u64 new_i_size,
s64 i_sectors_delta)
{
- struct btree_iter iter;
- struct bkey_i *k;
- struct bkey_i_inode_v3 *inode;
/*
* Crazy performance optimization:
* Every extent update needs to also update the inode: the inode trigger
@@ -213,26 +210,37 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
* to be journalled - if we crash, the bi_journal_seq update will be
* lost, but that's fine.
*/
- unsigned inode_update_flags = BTREE_UPDATE_NOJOURNAL;
- int ret;
+ unsigned inode_update_flags = BTREE_UPDATE_nojournal;
- k = bch2_bkey_get_mut_noupdate(trans, &iter, BTREE_ID_inodes,
+ struct btree_iter iter;
+ struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
SPOS(0,
extent_iter->pos.inode,
extent_iter->snapshot),
- BTREE_ITER_CACHED);
- ret = PTR_ERR_OR_ZERO(k);
+ BTREE_ITER_cached);
+ int ret = bkey_err(k);
if (unlikely(ret))
return ret;
- if (unlikely(k->k.type != KEY_TYPE_inode_v3)) {
- k = bch2_inode_to_v3(trans, k);
- ret = PTR_ERR_OR_ZERO(k);
+ /*
+ * varint_decode_fast(), in the inode .invalid method, reads up to 7
+ * bytes past the end of the buffer:
+ */
+ struct bkey_i *k_mut = bch2_trans_kmalloc_nomemzero(trans, bkey_bytes(k.k) + 8);
+ ret = PTR_ERR_OR_ZERO(k_mut);
+ if (unlikely(ret))
+ goto err;
+
+ bkey_reassemble(k_mut, k);
+
+ if (unlikely(k_mut->k.type != KEY_TYPE_inode_v3)) {
+ k_mut = bch2_inode_to_v3(trans, k_mut);
+ ret = PTR_ERR_OR_ZERO(k_mut);
if (unlikely(ret))
goto err;
}
- inode = bkey_i_to_inode_v3(k);
+ struct bkey_i_inode_v3 *inode = bkey_i_to_inode_v3(k_mut);
if (!(le64_to_cpu(inode->v.bi_flags) & BCH_INODE_i_size_dirty) &&
new_i_size > le64_to_cpu(inode->v.bi_size)) {
@@ -251,7 +259,7 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
}
ret = bch2_trans_update(trans, &iter, &inode->k_i,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
+ BTREE_UPDATE_internal_snapshot_node|
inode_update_flags);
err:
bch2_trans_iter_exit(trans, &iter);
@@ -360,7 +368,7 @@ static int bch2_write_index_default(struct bch_write_op *op)
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
bkey_start_pos(&sk.k->k),
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+ BTREE_ITER_slots|BTREE_ITER_intent);
ret = bch2_bkey_set_needs_rebalance(c, sk.k, &op->opts) ?:
bch2_extent_update(trans, inum, &iter, sk.k,
@@ -399,13 +407,12 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
BUG_ON(c->opts.nochanges);
bkey_for_each_ptr(ptrs, ptr) {
- BUG_ON(!bch2_dev_exists2(c, ptr->dev));
-
- struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ struct bch_dev *ca = nocow
+ ? bch2_dev_have_ref(c, ptr->dev)
+ : bch2_dev_get_ioref(c, ptr->dev, type == BCH_DATA_btree ? READ : WRITE);
if (to_entry(ptr + 1) < ptrs.end) {
- n = to_wbio(bio_alloc_clone(NULL, &wbio->bio,
- GFP_NOFS, &ca->replica_set));
+ n = to_wbio(bio_alloc_clone(NULL, &wbio->bio, GFP_NOFS, &c->replica_set));
n->bio.bi_end_io = wbio->bio.bi_end_io;
n->bio.bi_private = wbio->bio.bi_private;
@@ -422,11 +429,12 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
n->c = c;
n->dev = ptr->dev;
- n->have_ioref = nocow || bch2_dev_get_ioref(ca,
- type == BCH_DATA_btree ? READ : WRITE);
+ n->have_ioref = ca != NULL;
n->nocow = nocow;
n->submit_time = local_clock();
n->inode_offset = bkey_start_offset(&k->k);
+ if (nocow)
+ n->nocow_bucket = PTR_BUCKET_NR(ca, ptr);
n->bio.bi_iter.bi_sector = ptr->offset;
if (likely(n->have_ioref)) {
@@ -473,7 +481,6 @@ static void bch2_write_done(struct closure *cl)
static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op)
{
struct keylist *keys = &op->insert_keys;
- struct bch_extent_ptr *ptr;
struct bkey_i *src, *dst = keys->keys, *n;
for (src = keys->keys; src != keys->top; src = n) {
@@ -642,7 +649,9 @@ static void bch2_write_endio(struct bio *bio)
struct bch_write_bio *wbio = to_wbio(bio);
struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
struct bch_fs *c = wbio->c;
- struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
+ struct bch_dev *ca = wbio->have_ioref
+ ? bch2_dev_have_ref(c, wbio->dev)
+ : NULL;
if (bch2_dev_inum_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
op->pos.inode,
@@ -653,8 +662,12 @@ static void bch2_write_endio(struct bio *bio)
op->flags |= BCH_WRITE_IO_ERROR;
}
- if (wbio->nocow)
+ if (wbio->nocow) {
+ bch2_bucket_nocow_unlock(&c->nocow_locks,
+ POS(ca->dev_idx, wbio->nocow_bucket),
+ BUCKET_NOCOW_LOCK_UPDATE);
set_bit(wbio->dev, op->devs_need_flush->d);
+ }
if (wbio->have_ioref) {
bch2_latency_acct(ca, wbio->submit_time, WRITE);
@@ -1093,30 +1106,21 @@ static bool bch2_extent_is_writeable(struct bch_write_op *op,
return false;
e = bkey_s_c_to_extent(k);
+
+ rcu_read_lock();
extent_for_each_ptr_decode(e, p, entry) {
- if (crc_is_encoded(p.crc) || p.has_ec)
+ if (crc_is_encoded(p.crc) || p.has_ec) {
+ rcu_read_unlock();
return false;
+ }
replicas += bch2_extent_ptr_durability(c, &p);
}
+ rcu_read_unlock();
return replicas >= op->opts.data_replicas;
}
-static inline void bch2_nocow_write_unlock(struct bch_write_op *op)
-{
- struct bch_fs *c = op->c;
-
- for_each_keylist_key(&op->insert_keys, k) {
- struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
-
- bkey_for_each_ptr(ptrs, ptr)
- bch2_bucket_nocow_unlock(&c->nocow_locks,
- PTR_BUCKET_POS(c, ptr),
- BUCKET_NOCOW_LOCK_UPDATE);
- }
-}
-
static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_i *orig,
@@ -1150,7 +1154,7 @@ static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans,
return bch2_extent_update_i_size_sectors(trans, iter,
min(new->k.p.offset << 9, new_i_size), 0) ?:
bch2_trans_update(trans, iter, new,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ BTREE_UPDATE_internal_snapshot_node);
}
static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
@@ -1161,7 +1165,7 @@ static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
for_each_keylist_key(&op->insert_keys, orig) {
int ret = for_each_btree_key_upto_commit(trans, iter, BTREE_ID_extents,
bkey_start_pos(&orig->k), orig->k.p,
- BTREE_ITER_INTENT, k,
+ BTREE_ITER_intent, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
bch2_nocow_write_convert_one_unwritten(trans, &iter, orig, k, op->new_i_size);
}));
@@ -1187,8 +1191,6 @@ static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
static void __bch2_nocow_write_done(struct bch_write_op *op)
{
- bch2_nocow_write_unlock(op);
-
if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
op->error = -EIO;
} else if (unlikely(op->flags & BCH_WRITE_CONVERT_UNWRITTEN))
@@ -1234,12 +1236,16 @@ retry:
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
SPOS(op->pos.inode, op->pos.offset, snapshot),
- BTREE_ITER_SLOTS);
+ BTREE_ITER_slots);
while (1) {
struct bio *bio = &op->wbio.bio;
buckets.nr = 0;
+ ret = bch2_trans_relock(trans);
+ if (ret)
+ break;
+
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
@@ -1259,14 +1265,15 @@ retry:
/* Get iorefs before dropping btree locks: */
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
bkey_for_each_ptr(ptrs, ptr) {
- struct bpos b = PTR_BUCKET_POS(c, ptr);
+ struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE);
+ if (unlikely(!ca))
+ goto err_get_ioref;
+
+ struct bpos b = PTR_BUCKET_POS(ca, ptr);
struct nocow_lock_bucket *l =
bucket_nocow_lock(&c->nocow_locks, bucket_to_u64(b));
prefetch(l);
- if (unlikely(!bch2_dev_get_ioref(bch_dev_bkey_exists(c, ptr->dev), WRITE)))
- goto err_get_ioref;
-
/* XXX allocating memory with btree locks held - rare */
darray_push_gfp(&buckets, ((struct bucket_to_lock) {
.b = b, .gen = ptr->gen, .l = l,
@@ -1285,7 +1292,7 @@ retry:
bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top);
darray_for_each(buckets, i) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, i->b.inode);
+ struct bch_dev *ca = bch2_dev_have_ref(c, i->b.inode);
__bch2_bucket_nocow_lock(&c->nocow_locks, i->l,
bucket_to_u64(i->b),
@@ -1362,7 +1369,7 @@ err:
return;
err_get_ioref:
darray_for_each(buckets, i)
- percpu_ref_put(&bch_dev_bkey_exists(c, i->b.inode)->io_ref);
+ percpu_ref_put(&bch2_dev_have_ref(c, i->b.inode)->io_ref);
/* Fall back to COW path: */
goto out;
@@ -1483,7 +1490,11 @@ err:
if ((op->flags & BCH_WRITE_SYNC) ||
(!(op->flags & BCH_WRITE_DONE) &&
!(op->flags & BCH_WRITE_IN_WORKER))) {
- closure_sync(&op->cl);
+ if (closure_sync_timeout(&op->cl, HZ * 10)) {
+ bch2_print_allocator_stuck(c);
+ closure_sync(&op->cl);
+ }
+
__bch2_write_index(op);
if (!(op->flags & BCH_WRITE_DONE))
@@ -1505,6 +1516,8 @@ static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
unsigned sectors;
int ret;
+ memset(&op->failed, 0, sizeof(op->failed));
+
op->flags |= BCH_WRITE_WROTE_DATA_INLINE;
op->flags |= BCH_WRITE_DONE;
@@ -1639,8 +1652,7 @@ void bch2_write_op_to_text(struct printbuf *out, struct bch_write_op *op)
prt_bitflags(out, bch2_write_flags, op->flags);
prt_newline(out);
- prt_printf(out, "ref: %u", closure_nr_remaining(&op->cl));
- prt_newline(out);
+ prt_printf(out, "ref: %u\n", closure_nr_remaining(&op->cl));
printbuf_indent_sub(out, 2);
}
@@ -1648,13 +1660,14 @@ void bch2_write_op_to_text(struct printbuf *out, struct bch_write_op *op)
void bch2_fs_io_write_exit(struct bch_fs *c)
{
mempool_exit(&c->bio_bounce_pages);
+ bioset_exit(&c->replica_set);
bioset_exit(&c->bio_write);
}
int bch2_fs_io_write_init(struct bch_fs *c)
{
- if (bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio),
- BIOSET_NEED_BVECS))
+ if (bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio), BIOSET_NEED_BVECS) ||
+ bioset_init(&c->replica_set, 4, offsetof(struct bch_write_bio, bio), 0))
return -BCH_ERR_ENOMEM_bio_write_init;
if (mempool_init_page_pool(&c->bio_bounce_pages,
diff --git a/fs/bcachefs/io_write_types.h b/fs/bcachefs/io_write_types.h
index c7f97c2c4805..6e878a6f2f0b 100644
--- a/fs/bcachefs/io_write_types.h
+++ b/fs/bcachefs/io_write_types.h
@@ -20,6 +20,7 @@ struct bch_write_bio {
u64 submit_time;
u64 inode_offset;
+ u64 nocow_bucket;
struct bch_devs_list failed;
u8 dev;
diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c
index 9c9a25dbd613..adec8e1ea73e 100644
--- a/fs/bcachefs/journal.c
+++ b/fs/bcachefs/journal.c
@@ -53,29 +53,19 @@ static void bch2_journal_buf_to_text(struct printbuf *out, struct journal *j, u6
unsigned i = seq & JOURNAL_BUF_MASK;
struct journal_buf *buf = j->buf + i;
- prt_str(out, "seq:");
- prt_tab(out);
- prt_printf(out, "%llu", seq);
- prt_newline(out);
+ prt_printf(out, "seq:\t%llu\n", seq);
printbuf_indent_add(out, 2);
- prt_str(out, "refcount:");
- prt_tab(out);
- prt_printf(out, "%u", journal_state_count(s, i));
- prt_newline(out);
+ prt_printf(out, "refcount:\t%u\n", journal_state_count(s, i));
- prt_str(out, "size:");
- prt_tab(out);
+ prt_printf(out, "size:\t");
prt_human_readable_u64(out, vstruct_bytes(buf->data));
prt_newline(out);
- prt_str(out, "expires:");
- prt_tab(out);
- prt_printf(out, "%li jiffies", buf->expires - jiffies);
- prt_newline(out);
+ prt_printf(out, "expires:\t");
+ prt_printf(out, "%li jiffies\n", buf->expires - jiffies);
- prt_str(out, "flags:");
- prt_tab(out);
+ prt_printf(out, "flags:\t");
if (buf->noflush)
prt_str(out, "noflush ");
if (buf->must_flush)
@@ -87,9 +77,9 @@ static void bch2_journal_buf_to_text(struct printbuf *out, struct journal *j, u6
if (buf->write_started)
prt_str(out, "write_started ");
if (buf->write_allocated)
- prt_str(out, "write allocated ");
+ prt_str(out, "write_allocated ");
if (buf->write_done)
- prt_str(out, "write done");
+ prt_str(out, "write_done");
prt_newline(out);
printbuf_indent_sub(out, 2);
@@ -706,6 +696,12 @@ recheck_need_open:
spin_unlock(&j->lock);
+ /*
+ * We're called from bch2_journal_flush_seq() -> wait_event();
+ * but this might block. We won't usually block, so we won't
+ * livelock:
+ */
+ sched_annotate_sleep();
ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
if (ret)
return ret;
@@ -870,6 +866,8 @@ static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct jou
{
struct journal_buf *ret = NULL;
+ /* We're inside wait_event(), but using mutex_lock(: */
+ sched_annotate_sleep();
mutex_lock(&j->buf_lock);
spin_lock(&j->lock);
max_seq = min(max_seq, journal_cur_seq(j));
@@ -940,7 +938,8 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
break;
}
} else {
- ob[nr_got] = bch2_bucket_alloc(c, ca, BCH_WATERMARK_normal, cl);
+ ob[nr_got] = bch2_bucket_alloc(c, ca, BCH_WATERMARK_normal,
+ BCH_DATA_journal, cl);
ret = PTR_ERR_OR_ZERO(ob[nr_got]);
if (ret)
break;
@@ -948,7 +947,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
ret = bch2_trans_run(c,
bch2_trans_mark_metadata_bucket(trans, ca,
ob[nr_got]->bucket, BCH_DATA_journal,
- ca->mi.bucket_size));
+ ca->mi.bucket_size, BTREE_TRIGGER_transactional));
if (ret) {
bch2_open_bucket_put(c, ob[nr_got]);
bch_err_msg(c, ret, "marking new journal buckets");
@@ -1028,7 +1027,8 @@ err_unblock:
for (i = 0; i < nr_got; i++)
bch2_trans_run(c,
bch2_trans_mark_metadata_bucket(trans, ca,
- bu[i], BCH_DATA_free, 0));
+ bu[i], BCH_DATA_free, 0,
+ BTREE_TRIGGER_transactional));
err_free:
if (!new_fs)
for (i = 0; i < nr_got; i++)
@@ -1179,12 +1179,14 @@ void bch2_fs_journal_stop(struct journal *j)
bch2_journal_meta(j);
journal_quiesce(j);
+ cancel_delayed_work_sync(&j->write_work);
BUG_ON(!bch2_journal_error(j) &&
- test_bit(JOURNAL_REPLAY_DONE, &j->flags) &&
+ test_bit(JOURNAL_replay_done, &j->flags) &&
j->last_empty_seq != journal_cur_seq(j));
- cancel_delayed_work_sync(&j->write_work);
+ if (!bch2_journal_error(j))
+ clear_bit(JOURNAL_running, &j->flags);
}
int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
@@ -1258,7 +1260,7 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
spin_lock(&j->lock);
- set_bit(JOURNAL_STARTED, &j->flags);
+ set_bit(JOURNAL_running, &j->flags);
j->last_flush_write = jiffies;
j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
@@ -1399,6 +1401,13 @@ int bch2_fs_journal_init(struct journal *j)
/* debug: */
+static const char * const bch2_journal_flags_strs[] = {
+#define x(n) #n,
+ JOURNAL_FLAGS()
+#undef x
+ NULL
+};
+
void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
@@ -1407,19 +1416,22 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
u64 nr_writes = j->nr_flush_writes + j->nr_noflush_writes;
if (!out->nr_tabstops)
- printbuf_tabstop_push(out, 24);
+ printbuf_tabstop_push(out, 28);
out->atomic++;
rcu_read_lock();
s = READ_ONCE(j->reservations);
+ prt_printf(out, "flags:\t");
+ prt_bitflags(out, bch2_journal_flags_strs, j->flags);
+ prt_newline(out);
prt_printf(out, "dirty journal entries:\t%llu/%llu\n", fifo_used(&j->pin), j->pin.size);
- prt_printf(out, "seq:\t\t\t%llu\n", journal_cur_seq(j));
- prt_printf(out, "seq_ondisk:\t\t%llu\n", j->seq_ondisk);
- prt_printf(out, "last_seq:\t\t%llu\n", journal_last_seq(j));
+ prt_printf(out, "seq:\t%llu\n", journal_cur_seq(j));
+ prt_printf(out, "seq_ondisk:\t%llu\n", j->seq_ondisk);
+ prt_printf(out, "last_seq:\t%llu\n", journal_last_seq(j));
prt_printf(out, "last_seq_ondisk:\t%llu\n", j->last_seq_ondisk);
prt_printf(out, "flushed_seq_ondisk:\t%llu\n", j->flushed_seq_ondisk);
- prt_printf(out, "watermark:\t\t%s\n", bch2_watermarks[j->watermark]);
+ prt_printf(out, "watermark:\t%s\n", bch2_watermarks[j->watermark]);
prt_printf(out, "each entry reserved:\t%u\n", j->entry_u64s_reserved);
prt_printf(out, "nr flush writes:\t%llu\n", j->nr_flush_writes);
prt_printf(out, "nr noflush writes:\t%llu\n", j->nr_noflush_writes);
@@ -1428,48 +1440,44 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
prt_newline(out);
prt_printf(out, "nr direct reclaim:\t%llu\n", j->nr_direct_reclaim);
prt_printf(out, "nr background reclaim:\t%llu\n", j->nr_background_reclaim);
- prt_printf(out, "reclaim kicked:\t\t%u\n", j->reclaim_kicked);
+ prt_printf(out, "reclaim kicked:\t%u\n", j->reclaim_kicked);
prt_printf(out, "reclaim runs in:\t%u ms\n", time_after(j->next_reclaim, now)
? jiffies_to_msecs(j->next_reclaim - jiffies) : 0);
- prt_printf(out, "blocked:\t\t%u\n", j->blocked);
+ prt_printf(out, "blocked:\t%u\n", j->blocked);
prt_printf(out, "current entry sectors:\t%u\n", j->cur_entry_sectors);
prt_printf(out, "current entry error:\t%s\n", bch2_journal_errors[j->cur_entry_error]);
- prt_printf(out, "current entry:\t\t");
+ prt_printf(out, "current entry:\t");
switch (s.cur_entry_offset) {
case JOURNAL_ENTRY_ERROR_VAL:
- prt_printf(out, "error");
+ prt_printf(out, "error\n");
break;
case JOURNAL_ENTRY_CLOSED_VAL:
- prt_printf(out, "closed");
+ prt_printf(out, "closed\n");
break;
default:
- prt_printf(out, "%u/%u", s.cur_entry_offset, j->cur_entry_u64s);
+ prt_printf(out, "%u/%u\n", s.cur_entry_offset, j->cur_entry_u64s);
break;
}
- prt_newline(out);
- prt_printf(out, "unwritten entries:");
- prt_newline(out);
+ prt_printf(out, "unwritten entries:\n");
bch2_journal_bufs_to_text(out, j);
- prt_printf(out,
- "replay done:\t\t%i\n",
- test_bit(JOURNAL_REPLAY_DONE, &j->flags));
-
prt_printf(out, "space:\n");
- prt_printf(out, "\tdiscarded\t%u:%u\n",
+ printbuf_indent_add(out, 2);
+ prt_printf(out, "discarded\t%u:%u\n",
j->space[journal_space_discarded].next_entry,
j->space[journal_space_discarded].total);
- prt_printf(out, "\tclean ondisk\t%u:%u\n",
+ prt_printf(out, "clean ondisk\t%u:%u\n",
j->space[journal_space_clean_ondisk].next_entry,
j->space[journal_space_clean_ondisk].total);
- prt_printf(out, "\tclean\t\t%u:%u\n",
+ prt_printf(out, "clean\t%u:%u\n",
j->space[journal_space_clean].next_entry,
j->space[journal_space_clean].total);
- prt_printf(out, "\ttotal\t\t%u:%u\n",
+ prt_printf(out, "total\t%u:%u\n",
j->space[journal_space_total].next_entry,
j->space[journal_space_total].total);
+ printbuf_indent_sub(out, 2);
for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
struct journal_device *ja = &ca->journal;
@@ -1480,14 +1488,16 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
if (!ja->nr)
continue;
- prt_printf(out, "dev %u:\n", ca->dev_idx);
- prt_printf(out, "\tnr\t\t%u\n", ja->nr);
- prt_printf(out, "\tbucket size\t%u\n", ca->mi.bucket_size);
- prt_printf(out, "\tavailable\t%u:%u\n", bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
- prt_printf(out, "\tdiscard_idx\t%u\n", ja->discard_idx);
- prt_printf(out, "\tdirty_ondisk\t%u (seq %llu)\n", ja->dirty_idx_ondisk, ja->bucket_seq[ja->dirty_idx_ondisk]);
- prt_printf(out, "\tdirty_idx\t%u (seq %llu)\n", ja->dirty_idx, ja->bucket_seq[ja->dirty_idx]);
- prt_printf(out, "\tcur_idx\t\t%u (seq %llu)\n", ja->cur_idx, ja->bucket_seq[ja->cur_idx]);
+ prt_printf(out, "dev %u:\n", ca->dev_idx);
+ printbuf_indent_add(out, 2);
+ prt_printf(out, "nr\t%u\n", ja->nr);
+ prt_printf(out, "bucket size\t%u\n", ca->mi.bucket_size);
+ prt_printf(out, "available\t%u:%u\n", bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
+ prt_printf(out, "discard_idx\t%u\n", ja->discard_idx);
+ prt_printf(out, "dirty_ondisk\t%u (seq %llu)\n",ja->dirty_idx_ondisk, ja->bucket_seq[ja->dirty_idx_ondisk]);
+ prt_printf(out, "dirty_idx\t%u (seq %llu)\n", ja->dirty_idx, ja->bucket_seq[ja->dirty_idx]);
+ prt_printf(out, "cur_idx\t%u (seq %llu)\n", ja->cur_idx, ja->bucket_seq[ja->cur_idx]);
+ printbuf_indent_sub(out, 2);
}
rcu_read_unlock();
@@ -1519,25 +1529,18 @@ bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64
pin_list = journal_seq_pin(j, *seq);
- prt_printf(out, "%llu: count %u", *seq, atomic_read(&pin_list->count));
- prt_newline(out);
+ prt_printf(out, "%llu: count %u\n", *seq, atomic_read(&pin_list->count));
printbuf_indent_add(out, 2);
for (unsigned i = 0; i < ARRAY_SIZE(pin_list->list); i++)
- list_for_each_entry(pin, &pin_list->list[i], list) {
- prt_printf(out, "\t%px %ps", pin, pin->flush);
- prt_newline(out);
- }
+ list_for_each_entry(pin, &pin_list->list[i], list)
+ prt_printf(out, "\t%px %ps\n", pin, pin->flush);
- if (!list_empty(&pin_list->flushed)) {
- prt_printf(out, "flushed:");
- prt_newline(out);
- }
+ if (!list_empty(&pin_list->flushed))
+ prt_printf(out, "flushed:\n");
- list_for_each_entry(pin, &pin_list->flushed, list) {
- prt_printf(out, "\t%px %ps", pin, pin->flush);
- prt_newline(out);
- }
+ list_for_each_entry(pin, &pin_list->flushed, list)
+ prt_printf(out, "\t%px %ps\n", pin, pin->flush);
printbuf_indent_sub(out, 2);
diff --git a/fs/bcachefs/journal.h b/fs/bcachefs/journal.h
index 7c7528f839c5..fd1f7cdaa8bc 100644
--- a/fs/bcachefs/journal.h
+++ b/fs/bcachefs/journal.h
@@ -372,7 +372,7 @@ static inline int bch2_journal_res_get(struct journal *j, struct journal_res *re
int ret;
EBUG_ON(res->ref);
- EBUG_ON(!test_bit(JOURNAL_STARTED, &j->flags));
+ EBUG_ON(!test_bit(JOURNAL_running, &j->flags));
res->u64s = u64s;
@@ -418,8 +418,8 @@ struct bch_dev;
static inline void bch2_journal_set_replay_done(struct journal *j)
{
- BUG_ON(!test_bit(JOURNAL_STARTED, &j->flags));
- set_bit(JOURNAL_REPLAY_DONE, &j->flags);
+ BUG_ON(!test_bit(JOURNAL_running, &j->flags));
+ set_bit(JOURNAL_replay_done, &j->flags);
}
void bch2_journal_unblock(struct journal *);
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
index 725fcf46f631..cdcb1ad49af4 100644
--- a/fs/bcachefs/journal_io.c
+++ b/fs/bcachefs/journal_io.c
@@ -17,15 +17,38 @@
#include "sb-clean.h"
#include "trace.h"
+void bch2_journal_pos_from_member_info_set(struct bch_fs *c)
+{
+ lockdep_assert_held(&c->sb_lock);
+
+ for_each_member_device(c, ca) {
+ struct bch_member *m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
+
+ m->last_journal_bucket = cpu_to_le32(ca->journal.cur_idx);
+ m->last_journal_bucket_offset = cpu_to_le32(ca->mi.bucket_size - ca->journal.sectors_free);
+ }
+}
+
+void bch2_journal_pos_from_member_info_resume(struct bch_fs *c)
+{
+ mutex_lock(&c->sb_lock);
+ for_each_member_device(c, ca) {
+ struct bch_member m = bch2_sb_member_get(c->disk_sb.sb, ca->dev_idx);
+
+ unsigned idx = le32_to_cpu(m.last_journal_bucket);
+ if (idx < ca->journal.nr)
+ ca->journal.cur_idx = idx;
+ unsigned offset = le32_to_cpu(m.last_journal_bucket_offset);
+ if (offset <= ca->mi.bucket_size)
+ ca->journal.sectors_free = ca->mi.bucket_size - offset;
+ }
+ mutex_unlock(&c->sb_lock);
+}
+
void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
struct journal_replay *j)
{
darray_for_each(j->ptrs, i) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, i->dev);
- u64 offset;
-
- div64_u64_rem(i->sector, ca->mi.bucket_size, &offset);
-
if (i != j->ptrs.data)
prt_printf(out, " ");
prt_printf(out, "%u:%u:%u (sector %llu)",
@@ -122,6 +145,10 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
struct printbuf buf = PRINTBUF;
int ret = JOURNAL_ENTRY_ADD_OK;
+ if (!c->journal.oldest_seq_found_ondisk ||
+ le64_to_cpu(j->seq) < c->journal.oldest_seq_found_ondisk)
+ c->journal.oldest_seq_found_ondisk = le64_to_cpu(j->seq);
+
/* Is this entry older than the range we need? */
if (!c->opts.read_entire_journal &&
le64_to_cpu(j->seq) < jlist->last_seq)
@@ -247,7 +274,7 @@ static void journal_entry_err_msg(struct printbuf *out,
if (entry) {
prt_str(out, " type=");
- prt_str(out, bch2_jset_entry_types[entry->type]);
+ bch2_prt_jset_entry_type(out, entry->type);
}
if (!jset) {
@@ -272,7 +299,7 @@ static void journal_entry_err_msg(struct printbuf *out,
journal_entry_err_msg(&_buf, version, jset, entry); \
prt_printf(&_buf, msg, ##__VA_ARGS__); \
\
- switch (flags & BKEY_INVALID_WRITE) { \
+ switch (flags & BCH_VALIDATE_write) { \
case READ: \
mustfix_fsck_err(c, _err, "%s", _buf.buf); \
break; \
@@ -301,9 +328,9 @@ static int journal_validate_key(struct bch_fs *c,
unsigned level, enum btree_id btree_id,
struct bkey_i *k,
unsigned version, int big_endian,
- enum bkey_invalid_flags flags)
+ enum bch_validate_flags flags)
{
- int write = flags & BKEY_INVALID_WRITE;
+ int write = flags & BCH_VALIDATE_write;
void *next = vstruct_next(entry);
struct printbuf buf = PRINTBUF;
int ret = 0;
@@ -376,7 +403,7 @@ static int journal_entry_btree_keys_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
- enum bkey_invalid_flags flags)
+ enum bch_validate_flags flags)
{
struct bkey_i *k = entry->start;
@@ -385,7 +412,7 @@ static int journal_entry_btree_keys_validate(struct bch_fs *c,
entry->level,
entry->btree_id,
k, version, big_endian,
- flags|BKEY_INVALID_JOURNAL);
+ flags|BCH_VALIDATE_journal);
if (ret == FSCK_DELETED_KEY)
continue;
@@ -403,7 +430,8 @@ static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs
jset_entry_for_each_key(entry, k) {
if (!first) {
prt_newline(out);
- prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]);
+ bch2_prt_jset_entry_type(out, entry->type);
+ prt_str(out, ": ");
}
prt_printf(out, "btree=%s l=%u ", bch2_btree_id_str(entry->btree_id), entry->level);
bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k));
@@ -415,7 +443,7 @@ static int journal_entry_btree_root_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
- enum bkey_invalid_flags flags)
+ enum bch_validate_flags flags)
{
struct bkey_i *k = entry->start;
int ret = 0;
@@ -454,7 +482,7 @@ static int journal_entry_prio_ptrs_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
- enum bkey_invalid_flags flags)
+ enum bch_validate_flags flags)
{
/* obsolete, don't care: */
return 0;
@@ -469,7 +497,7 @@ static int journal_entry_blacklist_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
- enum bkey_invalid_flags flags)
+ enum bch_validate_flags flags)
{
int ret = 0;
@@ -496,7 +524,7 @@ static int journal_entry_blacklist_v2_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
- enum bkey_invalid_flags flags)
+ enum bch_validate_flags flags)
{
struct jset_entry_blacklist_v2 *bl_entry;
int ret = 0;
@@ -538,7 +566,7 @@ static int journal_entry_usage_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
- enum bkey_invalid_flags flags)
+ enum bch_validate_flags flags)
{
struct jset_entry_usage *u =
container_of(entry, struct jset_entry_usage, entry);
@@ -563,16 +591,16 @@ static void journal_entry_usage_to_text(struct printbuf *out, struct bch_fs *c,
struct jset_entry_usage *u =
container_of(entry, struct jset_entry_usage, entry);
- prt_printf(out, "type=%s v=%llu",
- bch2_fs_usage_types[u->entry.btree_id],
- le64_to_cpu(u->v));
+ prt_str(out, "type=");
+ bch2_prt_fs_usage_type(out, u->entry.btree_id);
+ prt_printf(out, " v=%llu", le64_to_cpu(u->v));
}
static int journal_entry_data_usage_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
- enum bkey_invalid_flags flags)
+ enum bch_validate_flags flags)
{
struct jset_entry_data_usage *u =
container_of(entry, struct jset_entry_data_usage, entry);
@@ -616,7 +644,7 @@ static int journal_entry_clock_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
- enum bkey_invalid_flags flags)
+ enum bch_validate_flags flags)
{
struct jset_entry_clock *clock =
container_of(entry, struct jset_entry_clock, entry);
@@ -656,13 +684,12 @@ static int journal_entry_dev_usage_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
- enum bkey_invalid_flags flags)
+ enum bch_validate_flags flags)
{
struct jset_entry_dev_usage *u =
container_of(entry, struct jset_entry_dev_usage, entry);
unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
unsigned expected = sizeof(*u);
- unsigned dev;
int ret = 0;
if (journal_entry_err_on(bytes < expected,
@@ -674,16 +701,6 @@ static int journal_entry_dev_usage_validate(struct bch_fs *c,
return ret;
}
- dev = le32_to_cpu(u->dev);
-
- if (journal_entry_err_on(!bch2_dev_exists2(c, dev),
- c, version, jset, entry,
- journal_entry_dev_usage_bad_dev,
- "bad dev")) {
- journal_entry_null_range(entry, vstruct_next(entry));
- return ret;
- }
-
if (journal_entry_err_on(u->pad,
c, version, jset, entry,
journal_entry_dev_usage_bad_pad,
@@ -718,7 +735,7 @@ static int journal_entry_log_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
- enum bkey_invalid_flags flags)
+ enum bch_validate_flags flags)
{
return 0;
}
@@ -736,7 +753,7 @@ static int journal_entry_overwrite_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
- enum bkey_invalid_flags flags)
+ enum bch_validate_flags flags)
{
return journal_entry_btree_keys_validate(c, jset, entry,
version, big_endian, READ);
@@ -752,7 +769,7 @@ static int journal_entry_write_buffer_keys_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
- enum bkey_invalid_flags flags)
+ enum bch_validate_flags flags)
{
return journal_entry_btree_keys_validate(c, jset, entry,
version, big_endian, READ);
@@ -768,7 +785,7 @@ static int journal_entry_datetime_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
- enum bkey_invalid_flags flags)
+ enum bch_validate_flags flags)
{
unsigned bytes = vstruct_bytes(entry);
unsigned expected = 16;
@@ -798,7 +815,7 @@ static void journal_entry_datetime_to_text(struct printbuf *out, struct bch_fs *
struct jset_entry_ops {
int (*validate)(struct bch_fs *, struct jset *,
struct jset_entry *, unsigned, int,
- enum bkey_invalid_flags);
+ enum bch_validate_flags);
void (*to_text)(struct printbuf *, struct bch_fs *, struct jset_entry *);
};
@@ -816,7 +833,7 @@ int bch2_journal_entry_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
unsigned version, int big_endian,
- enum bkey_invalid_flags flags)
+ enum bch_validate_flags flags)
{
return entry->type < BCH_JSET_ENTRY_NR
? bch2_jset_entry_ops[entry->type].validate(c, jset, entry,
@@ -827,16 +844,16 @@ int bch2_journal_entry_validate(struct bch_fs *c,
void bch2_journal_entry_to_text(struct printbuf *out, struct bch_fs *c,
struct jset_entry *entry)
{
+ bch2_prt_jset_entry_type(out, entry->type);
+
if (entry->type < BCH_JSET_ENTRY_NR) {
- prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]);
+ prt_str(out, ": ");
bch2_jset_entry_ops[entry->type].to_text(out, c, entry);
- } else {
- prt_printf(out, "(unknown type %u)", entry->type);
}
}
static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
- enum bkey_invalid_flags flags)
+ enum bch_validate_flags flags)
{
unsigned version = le32_to_cpu(jset->version);
int ret = 0;
@@ -862,7 +879,7 @@ fsck_err:
static int jset_validate(struct bch_fs *c,
struct bch_dev *ca,
struct jset *jset, u64 sector,
- enum bkey_invalid_flags flags)
+ enum bch_validate_flags flags)
{
unsigned version;
int ret = 0;
@@ -917,7 +934,7 @@ static int jset_validate_early(struct bch_fs *c,
{
size_t bytes = vstruct_bytes(jset);
unsigned version;
- enum bkey_invalid_flags flags = BKEY_INVALID_JOURNAL;
+ enum bch_validate_flags flags = BCH_VALIDATE_journal;
int ret = 0;
if (le64_to_cpu(jset->magic) != jset_magic(c))
@@ -1056,6 +1073,13 @@ reread:
goto err;
}
+ if (le64_to_cpu(j->seq) > ja->highest_seq_found) {
+ ja->highest_seq_found = le64_to_cpu(j->seq);
+ ja->cur_idx = bucket;
+ ja->sectors_free = ca->mi.bucket_size -
+ bucket_remainder(ca, offset) - sectors;
+ }
+
/*
* This happens sometimes if we don't have discards on -
* when we've partially overwritten a bucket with new
@@ -1124,8 +1148,6 @@ static CLOSURE_CALLBACK(bch2_journal_read_device)
struct bch_fs *c = ca->fs;
struct journal_list *jlist =
container_of(cl->parent, struct journal_list, cl);
- struct journal_replay *r, **_r;
- struct genradix_iter iter;
struct journal_read_buf buf = { NULL, 0 };
unsigned i;
int ret = 0;
@@ -1145,47 +1167,6 @@ static CLOSURE_CALLBACK(bch2_journal_read_device)
goto err;
}
- ja->sectors_free = ca->mi.bucket_size;
-
- mutex_lock(&jlist->lock);
- genradix_for_each_reverse(&c->journal_entries, iter, _r) {
- r = *_r;
-
- if (!r)
- continue;
-
- darray_for_each(r->ptrs, i)
- if (i->dev == ca->dev_idx) {
- unsigned wrote = bucket_remainder(ca, i->sector) +
- vstruct_sectors(&r->j, c->block_bits);
-
- ja->cur_idx = i->bucket;
- ja->sectors_free = ca->mi.bucket_size - wrote;
- goto found;
- }
- }
-found:
- mutex_unlock(&jlist->lock);
-
- if (ja->bucket_seq[ja->cur_idx] &&
- ja->sectors_free == ca->mi.bucket_size) {
-#if 0
- /*
- * Debug code for ZNS support, where we (probably) want to be
- * correlated where we stopped in the journal to the zone write
- * points:
- */
- bch_err(c, "ja->sectors_free == ca->mi.bucket_size");
- bch_err(c, "cur_idx %u/%u", ja->cur_idx, ja->nr);
- for (i = 0; i < 3; i++) {
- unsigned idx = (ja->cur_idx + ja->nr - 1 + i) % ja->nr;
-
- bch_err(c, "bucket_seq[%u] = %llu", idx, ja->bucket_seq[idx]);
- }
-#endif
- ja->sectors_free = 0;
- }
-
/*
* Set dirty_idx to indicate the entire journal is full and needs to be
* reclaimed - journal reclaim will immediately reclaim whatever isn't
@@ -1254,7 +1235,7 @@ int bch2_journal_read(struct bch_fs *c,
* those entries will be blacklisted:
*/
genradix_for_each_reverse(&c->journal_entries, radix_iter, _i) {
- enum bkey_invalid_flags flags = BKEY_INVALID_JOURNAL;
+ enum bch_validate_flags flags = BCH_VALIDATE_journal;
i = *_i;
@@ -1365,7 +1346,7 @@ int bch2_journal_read(struct bch_fs *c,
fsck_err(c, journal_entries_missing,
"journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
" prev at %s\n"
- " next at %s",
+ " next at %s, continue?",
missing_start, missing_end,
*last_seq, *blacklist_seq - 1,
buf1.buf, buf2.buf);
@@ -1389,7 +1370,7 @@ int bch2_journal_read(struct bch_fs *c,
continue;
darray_for_each(i->ptrs, ptr) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev);
if (!ptr->csum_good)
bch_err_dev_offset(ca, ptr->sector,
@@ -1399,7 +1380,7 @@ int bch2_journal_read(struct bch_fs *c,
}
ret = jset_validate(c,
- bch_dev_bkey_exists(c, i->ptrs.data[0].dev),
+ bch2_dev_have_ref(c, i->ptrs.data[0].dev),
&i->j,
i->ptrs.data[0].sector,
READ);
@@ -1722,7 +1703,7 @@ static void journal_write_endio(struct bio *bio)
percpu_ref_put(&ca->io_ref);
}
-static CLOSURE_CALLBACK(do_journal_write)
+static CLOSURE_CALLBACK(journal_write_submit)
{
closure_type(w, struct journal_buf, io);
struct journal *j = container_of(w, struct journal, buf[w->idx]);
@@ -1730,10 +1711,8 @@ static CLOSURE_CALLBACK(do_journal_write)
unsigned sectors = vstruct_sectors(w->data, c->block_bits);
extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
- struct journal_device *ja = &ca->journal;
-
- if (!percpu_ref_tryget(&ca->io_ref)) {
+ struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE);
+ if (!ca) {
/* XXX: fix this */
bch_err(c, "missing device for journal write\n");
continue;
@@ -1742,6 +1721,7 @@ static CLOSURE_CALLBACK(do_journal_write)
this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal],
sectors);
+ struct journal_device *ja = &ca->journal;
struct bio *bio = &ja->bio[w->idx]->bio;
bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META);
bio->bi_iter.bi_sector = ptr->offset;
@@ -1767,6 +1747,44 @@ static CLOSURE_CALLBACK(do_journal_write)
continue_at(cl, journal_write_done, j->wq);
}
+static CLOSURE_CALLBACK(journal_write_preflush)
+{
+ closure_type(w, struct journal_buf, io);
+ struct journal *j = container_of(w, struct journal, buf[w->idx]);
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+
+ if (j->seq_ondisk + 1 != le64_to_cpu(w->data->seq)) {
+ spin_lock(&j->lock);
+ closure_wait(&j->async_wait, cl);
+ spin_unlock(&j->lock);
+
+ continue_at(cl, journal_write_preflush, j->wq);
+ return;
+ }
+
+ if (w->separate_flush) {
+ for_each_rw_member(c, ca) {
+ percpu_ref_get(&ca->io_ref);
+
+ struct journal_device *ja = &ca->journal;
+ struct bio *bio = &ja->bio[w->idx]->bio;
+ bio_reset(bio, ca->disk_sb.bdev,
+ REQ_OP_WRITE|REQ_SYNC|REQ_META|REQ_PREFLUSH);
+ bio->bi_end_io = journal_write_endio;
+ bio->bi_private = ca;
+ closure_bio_submit(bio, cl);
+ }
+
+ continue_at(cl, journal_write_submit, j->wq);
+ } else {
+ /*
+ * no need to punt to another work item if we're not waiting on
+ * preflushes
+ */
+ journal_write_submit(&cl->work);
+ }
+}
+
static int bch2_journal_write_prep(struct journal *j, struct journal_buf *w)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
@@ -1919,14 +1937,14 @@ static int bch2_journal_write_pick_flush(struct journal *j, struct journal_buf *
* So if we're in an error state, and we're still starting up, we don't
* write anything at all.
*/
- if (error && test_bit(JOURNAL_NEED_FLUSH_WRITE, &j->flags))
+ if (error && test_bit(JOURNAL_need_flush_write, &j->flags))
return -EIO;
if (error ||
w->noflush ||
(!w->must_flush &&
(jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) &&
- test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags))) {
+ test_bit(JOURNAL_may_skip_flush, &j->flags))) {
w->noflush = true;
SET_JSET_NO_FLUSH(w->data, true);
w->data->last_seq = 0;
@@ -1937,7 +1955,7 @@ static int bch2_journal_write_pick_flush(struct journal *j, struct journal_buf *
w->must_flush = true;
j->last_flush_write = jiffies;
j->nr_flush_writes++;
- clear_bit(JOURNAL_NEED_FLUSH_WRITE, &j->flags);
+ clear_bit(JOURNAL_need_flush_write, &j->flags);
}
return 0;
@@ -2032,23 +2050,9 @@ CLOSURE_CALLBACK(bch2_journal_write)
goto err;
if (!JSET_NO_FLUSH(w->data))
- closure_wait_event(&j->async_wait, j->seq_ondisk + 1 == le64_to_cpu(w->data->seq));
-
- if (!JSET_NO_FLUSH(w->data) && w->separate_flush) {
- for_each_rw_member(c, ca) {
- percpu_ref_get(&ca->io_ref);
-
- struct journal_device *ja = &ca->journal;
- struct bio *bio = &ja->bio[w->idx]->bio;
- bio_reset(bio, ca->disk_sb.bdev,
- REQ_OP_WRITE|REQ_SYNC|REQ_META|REQ_PREFLUSH);
- bio->bi_end_io = journal_write_endio;
- bio->bi_private = ca;
- closure_bio_submit(bio, cl);
- }
- }
-
- continue_at(cl, do_journal_write, j->wq);
+ continue_at(cl, journal_write_preflush, j->wq);
+ else
+ continue_at(cl, journal_write_submit, j->wq);
return;
no_io:
continue_at(cl, journal_write_done, j->wq);
diff --git a/fs/bcachefs/journal_io.h b/fs/bcachefs/journal_io.h
index 4f1e763ab506..2ca9cde30ea8 100644
--- a/fs/bcachefs/journal_io.h
+++ b/fs/bcachefs/journal_io.h
@@ -4,6 +4,9 @@
#include "darray.h"
+void bch2_journal_pos_from_member_info_set(struct bch_fs *);
+void bch2_journal_pos_from_member_info_resume(struct bch_fs *);
+
struct journal_ptr {
bool csum_good;
u8 dev;
@@ -60,7 +63,7 @@ static inline struct jset_entry *__jset_entry_type_next(struct jset *jset,
int bch2_journal_entry_validate(struct bch_fs *, struct jset *,
struct jset_entry *, unsigned, int,
- enum bkey_invalid_flags);
+ enum bch_validate_flags);
void bch2_journal_entry_to_text(struct printbuf *, struct bch_fs *,
struct jset_entry *);
diff --git a/fs/bcachefs/journal_reclaim.c b/fs/bcachefs/journal_reclaim.c
index ab811c0dad26..79be0eaddfa0 100644
--- a/fs/bcachefs/journal_reclaim.c
+++ b/fs/bcachefs/journal_reclaim.c
@@ -67,6 +67,8 @@ void bch2_journal_set_watermark(struct journal *j)
track_event_change(&c->times[BCH_TIME_blocked_write_buffer_full], low_on_wb))
trace_and_count(c, journal_full, c);
+ mod_bit(JOURNAL_space_low, &j->flags, low_on_space || low_on_pin);
+
swap(watermark, j->watermark);
if (watermark > j->watermark)
journal_wake(j);
@@ -223,9 +225,9 @@ void bch2_journal_space_available(struct journal *j)
j->space[journal_space_clean_ondisk].total) &&
(clean - clean_ondisk <= total / 8) &&
(clean_ondisk * 2 > clean))
- set_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
+ set_bit(JOURNAL_may_skip_flush, &j->flags);
else
- clear_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
+ clear_bit(JOURNAL_may_skip_flush, &j->flags);
bch2_journal_set_watermark(j);
out:
@@ -816,7 +818,7 @@ static int journal_flush_done(struct journal *j, u64 seq_to_flush,
* If journal replay hasn't completed, the unreplayed journal entries
* hold refs on their corresponding sequence numbers
*/
- ret = !test_bit(JOURNAL_REPLAY_DONE, &j->flags) ||
+ ret = !test_bit(JOURNAL_replay_done, &j->flags) ||
journal_last_seq(j) > seq_to_flush ||
!fifo_used(&j->pin);
@@ -831,7 +833,7 @@ bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
/* time_stats this */
bool did_work = false;
- if (!test_bit(JOURNAL_STARTED, &j->flags))
+ if (!test_bit(JOURNAL_running, &j->flags))
return false;
closure_wait_event(&j->async_wait,
diff --git a/fs/bcachefs/journal_sb.c b/fs/bcachefs/journal_sb.c
index ae4fb8c3a2bc..db80e506e3ab 100644
--- a/fs/bcachefs/journal_sb.c
+++ b/fs/bcachefs/journal_sb.c
@@ -16,9 +16,8 @@ static int u64_cmp(const void *_l, const void *_r)
return cmp_int(*l, *r);
}
-static int bch2_sb_journal_validate(struct bch_sb *sb,
- struct bch_sb_field *f,
- struct printbuf *err)
+static int bch2_sb_journal_validate(struct bch_sb *sb, struct bch_sb_field *f,
+ enum bch_validate_flags flags, struct printbuf *err)
{
struct bch_sb_field_journal *journal = field_to_type(f, journal);
struct bch_member m = bch2_sb_member_get(sb, sb->dev_idx);
@@ -99,9 +98,8 @@ static int u64_range_cmp(const void *_l, const void *_r)
return cmp_int(l->start, r->start);
}
-static int bch2_sb_journal_v2_validate(struct bch_sb *sb,
- struct bch_sb_field *f,
- struct printbuf *err)
+static int bch2_sb_journal_v2_validate(struct bch_sb *sb, struct bch_sb_field *f,
+ enum bch_validate_flags flags, struct printbuf *err)
{
struct bch_sb_field_journal_v2 *journal = field_to_type(f, journal_v2);
struct bch_member m = bch2_sb_member_get(sb, sb->dev_idx);
diff --git a/fs/bcachefs/journal_seq_blacklist.c b/fs/bcachefs/journal_seq_blacklist.c
index b5303874fc35..ed4846709611 100644
--- a/fs/bcachefs/journal_seq_blacklist.c
+++ b/fs/bcachefs/journal_seq_blacklist.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
-#include "btree_iter.h"
#include "eytzinger.h"
+#include "journal.h"
#include "journal_seq_blacklist.h"
#include "super-io.h"
@@ -95,8 +95,7 @@ out:
return ret ?: bch2_blacklist_table_initialize(c);
}
-static int journal_seq_blacklist_table_cmp(const void *_l,
- const void *_r, size_t size)
+static int journal_seq_blacklist_table_cmp(const void *_l, const void *_r)
{
const struct journal_seq_blacklist_table_entry *l = _l;
const struct journal_seq_blacklist_table_entry *r = _r;
@@ -163,9 +162,8 @@ int bch2_blacklist_table_initialize(struct bch_fs *c)
return 0;
}
-static int bch2_sb_journal_seq_blacklist_validate(struct bch_sb *sb,
- struct bch_sb_field *f,
- struct printbuf *err)
+static int bch2_sb_journal_seq_blacklist_validate(struct bch_sb *sb, struct bch_sb_field *f,
+ enum bch_validate_flags flags, struct printbuf *err)
{
struct bch_sb_field_journal_seq_blacklist *bl =
field_to_type(f, journal_seq_blacklist);
@@ -218,78 +216,40 @@ const struct bch_sb_field_ops bch_sb_field_ops_journal_seq_blacklist = {
.to_text = bch2_sb_journal_seq_blacklist_to_text
};
-void bch2_blacklist_entries_gc(struct work_struct *work)
+bool bch2_blacklist_entries_gc(struct bch_fs *c)
{
- struct bch_fs *c = container_of(work, struct bch_fs,
- journal_seq_blacklist_gc_work);
- struct journal_seq_blacklist_table *t;
- struct bch_sb_field_journal_seq_blacklist *bl;
struct journal_seq_blacklist_entry *src, *dst;
- struct btree_trans *trans = bch2_trans_get(c);
- unsigned i, nr, new_nr;
- int ret;
-
- for (i = 0; i < BTREE_ID_NR; i++) {
- struct btree_iter iter;
- struct btree *b;
-
- bch2_trans_node_iter_init(trans, &iter, i, POS_MIN,
- 0, 0, BTREE_ITER_PREFETCH);
-retry:
- bch2_trans_begin(trans);
-
- b = bch2_btree_iter_peek_node(&iter);
-
- while (!(ret = PTR_ERR_OR_ZERO(b)) &&
- b &&
- !test_bit(BCH_FS_stopping, &c->flags))
- b = bch2_btree_iter_next_node(&iter);
-
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
- bch2_trans_iter_exit(trans, &iter);
- }
-
- bch2_trans_put(trans);
- if (ret)
- return;
-
- mutex_lock(&c->sb_lock);
- bl = bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
+ struct bch_sb_field_journal_seq_blacklist *bl =
+ bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist);
if (!bl)
- goto out;
+ return false;
- nr = blacklist_nr_entries(bl);
+ unsigned nr = blacklist_nr_entries(bl);
dst = bl->start;
- t = c->journal_seq_blacklist_table;
+ struct journal_seq_blacklist_table *t = c->journal_seq_blacklist_table;
BUG_ON(nr != t->nr);
+ unsigned i;
for (src = bl->start, i = eytzinger0_first(t->nr);
src < bl->start + nr;
src++, i = eytzinger0_next(i, nr)) {
BUG_ON(t->entries[i].start != le64_to_cpu(src->start));
BUG_ON(t->entries[i].end != le64_to_cpu(src->end));
- if (t->entries[i].dirty)
+ if (t->entries[i].dirty || t->entries[i].end >= c->journal.oldest_seq_found_ondisk)
*dst++ = *src;
}
- new_nr = dst - bl->start;
-
- bch_info(c, "nr blacklist entries was %u, now %u", nr, new_nr);
-
- if (new_nr != nr) {
- bl = bch2_sb_field_resize(&c->disk_sb, journal_seq_blacklist,
- new_nr ? sb_blacklist_u64s(new_nr) : 0);
- BUG_ON(new_nr && !bl);
+ unsigned new_nr = dst - bl->start;
+ if (new_nr == nr)
+ return false;
- if (!new_nr)
- c->disk_sb.sb->features[0] &= cpu_to_le64(~(1ULL << BCH_FEATURE_journal_seq_blacklist_v3));
+ bch_verbose(c, "nr blacklist entries was %u, now %u", nr, new_nr);
- bch2_write_super(c);
- }
-out:
- mutex_unlock(&c->sb_lock);
+ bl = bch2_sb_field_resize(&c->disk_sb, journal_seq_blacklist,
+ new_nr ? sb_blacklist_u64s(new_nr) : 0);
+ BUG_ON(new_nr && !bl);
+ return true;
}
diff --git a/fs/bcachefs/journal_seq_blacklist.h b/fs/bcachefs/journal_seq_blacklist.h
index afb886ec8e25..d47636f96fdc 100644
--- a/fs/bcachefs/journal_seq_blacklist.h
+++ b/fs/bcachefs/journal_seq_blacklist.h
@@ -17,6 +17,6 @@ int bch2_blacklist_table_initialize(struct bch_fs *);
extern const struct bch_sb_field_ops bch_sb_field_ops_journal_seq_blacklist;
-void bch2_blacklist_entries_gc(struct work_struct *);
+bool bch2_blacklist_entries_gc(struct bch_fs *);
#endif /* _BCACHEFS_JOURNAL_SEQ_BLACKLIST_H */
diff --git a/fs/bcachefs/journal_types.h b/fs/bcachefs/journal_types.h
index 8c053cb64ca5..19183fcf7ad7 100644
--- a/fs/bcachefs/journal_types.h
+++ b/fs/bcachefs/journal_types.h
@@ -129,11 +129,17 @@ enum journal_space_from {
journal_space_nr,
};
+#define JOURNAL_FLAGS() \
+ x(replay_done) \
+ x(running) \
+ x(may_skip_flush) \
+ x(need_flush_write) \
+ x(space_low)
+
enum journal_flags {
- JOURNAL_REPLAY_DONE,
- JOURNAL_STARTED,
- JOURNAL_MAY_SKIP_FLUSH,
- JOURNAL_NEED_FLUSH_WRITE,
+#define x(n) JOURNAL_##n,
+ JOURNAL_FLAGS()
+#undef x
};
/* Reasons we may fail to get a journal reservation: */
@@ -228,6 +234,7 @@ struct journal {
u64 last_seq_ondisk;
u64 err_seq;
u64 last_empty_seq;
+ u64 oldest_seq_found_ondisk;
/*
* FIFO of journal entries whose btree updates have not yet been
@@ -325,6 +332,7 @@ struct journal_device {
/* for bch_journal_read_device */
struct closure read;
+ u64 highest_seq_found;
};
/*
diff --git a/fs/bcachefs/logged_ops.c b/fs/bcachefs/logged_ops.c
index 9fac838d123e..f49fdca1d07d 100644
--- a/fs/bcachefs/logged_ops.c
+++ b/fs/bcachefs/logged_ops.c
@@ -37,7 +37,6 @@ static int resume_logged_op(struct btree_trans *trans, struct btree_iter *iter,
const struct bch_logged_op_fn *fn = logged_op_fn(k.k->type);
struct bkey_buf sk;
u32 restart_count = trans->restart_count;
- int ret;
if (!fn)
return 0;
@@ -45,11 +44,11 @@ static int resume_logged_op(struct btree_trans *trans, struct btree_iter *iter,
bch2_bkey_buf_init(&sk);
bch2_bkey_buf_reassemble(&sk, c, k);
- ret = drop_locks_do(trans, (bch2_fs_lazy_rw(c), 0)) ?:
- fn->resume(trans, sk.k) ?: trans_was_restarted(trans, restart_count);
+ fn->resume(trans, sk.k);
bch2_bkey_buf_exit(&sk, c);
- return ret;
+
+ return trans_was_restarted(trans, restart_count);
}
int bch2_resume_logged_ops(struct bch_fs *c)
@@ -57,7 +56,7 @@ int bch2_resume_logged_ops(struct bch_fs *c)
int ret = bch2_trans_run(c,
for_each_btree_key(trans, iter,
BTREE_ID_logged_ops, POS_MIN,
- BTREE_ITER_PREFETCH, k,
+ BTREE_ITER_prefetch, k,
resume_logged_op(trans, &iter, k)));
bch_err_fn(c, ret);
return ret;
diff --git a/fs/bcachefs/lru.c b/fs/bcachefs/lru.c
index 26569043e368..a40d116224ed 100644
--- a/fs/bcachefs/lru.c
+++ b/fs/bcachefs/lru.c
@@ -11,7 +11,7 @@
/* KEY_TYPE_lru is obsolete: */
int bch2_lru_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
struct printbuf *err)
{
int ret = 0;
@@ -149,7 +149,7 @@ int bch2_check_lrus(struct bch_fs *c)
struct bpos last_flushed_pos = POS_MIN;
int ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter,
- BTREE_ID_lru, POS_MIN, BTREE_ITER_PREFETCH, k,
+ BTREE_ID_lru, POS_MIN, BTREE_ITER_prefetch, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc|BCH_TRANS_COMMIT_lazy_rw,
bch2_check_lru_key(trans, &iter, k, &last_flushed_pos)));
bch_err_fn(c, ret);
diff --git a/fs/bcachefs/lru.h b/fs/bcachefs/lru.h
index 429dca816df5..fb11ab0dd00e 100644
--- a/fs/bcachefs/lru.h
+++ b/fs/bcachefs/lru.h
@@ -49,7 +49,7 @@ static inline enum bch_lru_type lru_type(struct bkey_s_c l)
}
int bch2_lru_invalid(struct bch_fs *, struct bkey_s_c,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
void bch2_lru_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
void bch2_lru_pos_to_text(struct printbuf *, struct bpos);
diff --git a/fs/bcachefs/mean_and_variance_test.c b/fs/bcachefs/mean_and_variance_test.c
index db63b3f3b338..4c298e74723d 100644
--- a/fs/bcachefs/mean_and_variance_test.c
+++ b/fs/bcachefs/mean_and_variance_test.c
@@ -136,20 +136,8 @@ static void mean_and_variance_test_1(struct kunit *test)
d, mean, stddev, weighted_mean, weighted_stddev);
}
-static void mean_and_variance_test_2(struct kunit *test)
-{
- s64 d[] = { 100, 10, 10, 10, 10, 10, 10 };
- s64 mean[] = { 10, 10, 10, 10, 10, 10, 10 };
- s64 stddev[] = { 9, 9, 9, 9, 9, 9, 9 };
- s64 weighted_mean[] = { 32, 27, 22, 19, 17, 15, 14 };
- s64 weighted_stddev[] = { 38, 35, 31, 27, 24, 21, 18 };
-
- do_mean_and_variance_test(test, 10, 6, ARRAY_SIZE(d), 2,
- d, mean, stddev, weighted_mean, weighted_stddev);
-}
-
/* Test behaviour where we switch from one steady state to another: */
-static void mean_and_variance_test_3(struct kunit *test)
+static void mean_and_variance_test_2(struct kunit *test)
{
s64 d[] = { 100, 100, 100, 100, 100 };
s64 mean[] = { 22, 32, 40, 46, 50 };
@@ -161,18 +149,6 @@ static void mean_and_variance_test_3(struct kunit *test)
d, mean, stddev, weighted_mean, weighted_stddev);
}
-static void mean_and_variance_test_4(struct kunit *test)
-{
- s64 d[] = { 100, 100, 100, 100, 100 };
- s64 mean[] = { 10, 11, 12, 13, 14 };
- s64 stddev[] = { 9, 13, 15, 17, 19 };
- s64 weighted_mean[] = { 32, 49, 61, 71, 78 };
- s64 weighted_stddev[] = { 38, 44, 44, 41, 38 };
-
- do_mean_and_variance_test(test, 10, 6, ARRAY_SIZE(d), 2,
- d, mean, stddev, weighted_mean, weighted_stddev);
-}
-
static void mean_and_variance_fast_divpow2(struct kunit *test)
{
s64 i;
@@ -230,8 +206,6 @@ static struct kunit_case mean_and_variance_test_cases[] = {
KUNIT_CASE(mean_and_variance_weighted_advanced_test),
KUNIT_CASE(mean_and_variance_test_1),
KUNIT_CASE(mean_and_variance_test_2),
- KUNIT_CASE(mean_and_variance_test_3),
- KUNIT_CASE(mean_and_variance_test_4),
{}
};
diff --git a/fs/bcachefs/migrate.c b/fs/bcachefs/migrate.c
index 69098eeb5d48..ddc187fb693d 100644
--- a/fs/bcachefs/migrate.c
+++ b/fs/bcachefs/migrate.c
@@ -49,7 +49,7 @@ static int bch2_dev_usrdata_drop_key(struct btree_trans *trans,
if (!bch2_bkey_has_device_c(k, dev_idx))
return 0;
- n = bch2_bkey_make_mut(trans, iter, &k, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ n = bch2_bkey_make_mut(trans, iter, &k, BTREE_UPDATE_internal_snapshot_node);
ret = PTR_ERR_OR_ZERO(n);
if (ret)
return ret;
@@ -67,7 +67,7 @@ static int bch2_dev_usrdata_drop_key(struct btree_trans *trans,
/*
* Since we're not inserting through an extent iterator
- * (BTREE_ITER_ALL_SNAPSHOTS iterators aren't extent iterators),
+ * (BTREE_ITER_all_snapshots iterators aren't extent iterators),
* we aren't using the extent overwrite path to delete, we're
* just using the normal key deletion path:
*/
@@ -87,7 +87,7 @@ static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
continue;
ret = for_each_btree_key_commit(trans, iter, id, POS_MIN,
- BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
+ BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
bch2_dev_usrdata_drop_key(trans, &iter, k, dev_idx, flags));
if (ret)
@@ -119,7 +119,7 @@ static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
for (id = 0; id < BTREE_ID_NR; id++) {
bch2_trans_node_iter_init(trans, &iter, id, POS_MIN, 0, 0,
- BTREE_ITER_PREFETCH);
+ BTREE_ITER_prefetch);
retry:
ret = 0;
while (bch2_trans_begin(trans),
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c
index bf68ea49447b..8171f947fac8 100644
--- a/fs/bcachefs/move.c
+++ b/fs/bcachefs/move.c
@@ -41,28 +41,23 @@ static void bch2_data_update_opts_to_text(struct printbuf *out, struct bch_fs *c
struct data_update_opts *data_opts)
{
printbuf_tabstop_push(out, 20);
- prt_str(out, "rewrite ptrs:");
- prt_tab(out);
+ prt_str(out, "rewrite ptrs:\t");
bch2_prt_u64_base2(out, data_opts->rewrite_ptrs);
prt_newline(out);
- prt_str(out, "kill ptrs: ");
- prt_tab(out);
+ prt_str(out, "kill ptrs:\t");
bch2_prt_u64_base2(out, data_opts->kill_ptrs);
prt_newline(out);
- prt_str(out, "target: ");
- prt_tab(out);
+ prt_str(out, "target:\t");
bch2_target_to_text(out, c, data_opts->target);
prt_newline(out);
- prt_str(out, "compression: ");
- prt_tab(out);
+ prt_str(out, "compression:\t");
bch2_compression_opt_to_text(out, background_compression(*io_opts));
prt_newline(out);
- prt_str(out, "extra replicas: ");
- prt_tab(out);
+ prt_str(out, "extra replicas:\t");
prt_u64(out, data_opts->extra_replicas);
}
@@ -421,7 +416,7 @@ struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *trans,
io_opts->d.nr = 0;
ret = for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, extent_k.k->p.inode),
- BTREE_ITER_ALL_SNAPSHOTS, k, ({
+ BTREE_ITER_all_snapshots, k, ({
if (k.k->p.offset != extent_k.k->p.inode)
break;
@@ -467,7 +462,7 @@ int bch2_move_get_io_opts_one(struct btree_trans *trans,
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
SPOS(0, extent_k.k->p.inode, extent_k.k->p.snapshot),
- BTREE_ITER_CACHED);
+ BTREE_ITER_cached);
ret = bkey_err(k);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
return ret;
@@ -553,8 +548,8 @@ static int bch2_move_data_btree(struct moving_context *ctxt,
}
bch2_trans_iter_init(trans, &iter, btree_id, start,
- BTREE_ITER_PREFETCH|
- BTREE_ITER_ALL_SNAPSHOTS);
+ BTREE_ITER_prefetch|
+ BTREE_ITER_all_snapshots);
if (ctxt->rate)
bch2_ratelimit_reset(ctxt->rate);
@@ -695,6 +690,10 @@ int bch2_evacuate_bucket(struct moving_context *ctxt,
struct bpos bp_pos = POS_MIN;
int ret = 0;
+ struct bch_dev *ca = bch2_dev_tryget(c, bucket.inode);
+ if (!ca)
+ return 0;
+
trace_bucket_evacuate(c, &bucket);
bch2_bkey_buf_init(&sk);
@@ -705,7 +704,7 @@ int bch2_evacuate_bucket(struct moving_context *ctxt,
bch2_trans_begin(trans);
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
- bucket, BTREE_ITER_CACHED);
+ bucket, BTREE_ITER_cached);
ret = lockrestart_do(trans,
bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
bch2_trans_iter_exit(trans, &iter);
@@ -716,7 +715,7 @@ int bch2_evacuate_bucket(struct moving_context *ctxt,
a = bch2_alloc_to_v4(k, &a_convert);
dirty_sectors = bch2_bucket_sectors_dirty(*a);
- bucket_size = bch_dev_bkey_exists(c, bucket.inode)->mi.bucket_size;
+ bucket_size = ca->mi.bucket_size;
fragmentation = a->fragmentation_lru;
ret = bch2_btree_write_buffer_tryflush(trans);
@@ -730,9 +729,9 @@ int bch2_evacuate_bucket(struct moving_context *ctxt,
bch2_trans_begin(trans);
- ret = bch2_get_next_backpointer(trans, bucket, gen,
+ ret = bch2_get_next_backpointer(trans, ca, bucket, gen,
&bp_pos, &bp,
- BTREE_ITER_CACHED);
+ BTREE_ITER_cached);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
if (ret)
@@ -828,6 +827,7 @@ next:
trace_evacuate_bucket(c, &bucket, dirty_sectors, bucket_size, fragmentation, ret);
err:
+ bch2_dev_put(ca);
bch2_bkey_buf_exit(&sk, c);
return ret;
}
@@ -868,7 +868,7 @@ static int bch2_move_btree(struct bch_fs *c,
continue;
bch2_trans_node_iter_init(trans, &iter, btree, POS_MIN, 0, 0,
- BTREE_ITER_PREFETCH);
+ BTREE_ITER_prefetch);
retry:
ret = 0;
while (bch2_trans_begin(trans),
@@ -968,27 +968,17 @@ static bool migrate_btree_pred(struct bch_fs *c, void *arg,
return migrate_pred(c, arg, bkey_i_to_s_c(&b->key), io_opts, data_opts);
}
+/*
+ * Ancient versions of bcachefs produced packed formats which could represent
+ * keys that the in memory format cannot represent; this checks for those
+ * formats so we can get rid of them.
+ */
static bool bformat_needs_redo(struct bkey_format *f)
{
- unsigned i;
-
- for (i = 0; i < f->nr_fields; i++) {
- unsigned unpacked_bits = bch2_bkey_format_current.bits_per_field[i];
- u64 unpacked_mask = ~((~0ULL << 1) << (unpacked_bits - 1));
- u64 field_offset = le64_to_cpu(f->field_offset[i]);
-
- if (f->bits_per_field[i] > unpacked_bits)
+ for (unsigned i = 0; i < f->nr_fields; i++)
+ if (bch2_bkey_format_field_overflows(f, i))
return true;
- if ((f->bits_per_field[i] == unpacked_bits) && field_offset)
- return true;
-
- if (((field_offset + ((1ULL << f->bits_per_field[i]) - 1)) &
- unpacked_mask) <
- field_offset)
- return true;
- }
-
return false;
}
@@ -1043,6 +1033,7 @@ static bool drop_extra_replicas_pred(struct bch_fs *c, void *arg,
struct extent_ptr_decoded p;
unsigned i = 0;
+ rcu_read_lock();
bkey_for_each_ptr_decode(k.k, bch2_bkey_ptrs_c(k), p, entry) {
unsigned d = bch2_extent_ptr_durability(c, &p);
@@ -1053,6 +1044,7 @@ static bool drop_extra_replicas_pred(struct bch_fs *c, void *arg,
i++;
}
+ rcu_read_unlock();
return data_opts->kill_ptrs != 0;
}
@@ -1137,23 +1129,17 @@ void bch2_move_stats_to_text(struct printbuf *out, struct bch_move_stats *stats)
prt_newline(out);
printbuf_indent_add(out, 2);
- prt_str(out, "keys moved: ");
- prt_u64(out, atomic64_read(&stats->keys_moved));
- prt_newline(out);
-
- prt_str(out, "keys raced: ");
- prt_u64(out, atomic64_read(&stats->keys_raced));
- prt_newline(out);
-
- prt_str(out, "bytes seen: ");
+ prt_printf(out, "keys moved: %llu\n", atomic64_read(&stats->keys_moved));
+ prt_printf(out, "keys raced: %llu\n", atomic64_read(&stats->keys_raced));
+ prt_printf(out, "bytes seen: ");
prt_human_readable_u64(out, atomic64_read(&stats->sectors_seen) << 9);
prt_newline(out);
- prt_str(out, "bytes moved: ");
+ prt_printf(out, "bytes moved: ");
prt_human_readable_u64(out, atomic64_read(&stats->sectors_moved) << 9);
prt_newline(out);
- prt_str(out, "bytes raced: ");
+ prt_printf(out, "bytes raced: ");
prt_human_readable_u64(out, atomic64_read(&stats->sectors_raced) << 9);
prt_newline(out);
@@ -1167,19 +1153,17 @@ static void bch2_moving_ctxt_to_text(struct printbuf *out, struct bch_fs *c, str
bch2_move_stats_to_text(out, ctxt->stats);
printbuf_indent_add(out, 2);
- prt_printf(out, "reads: ios %u/%u sectors %u/%u",
+ prt_printf(out, "reads: ios %u/%u sectors %u/%u\n",
atomic_read(&ctxt->read_ios),
c->opts.move_ios_in_flight,
atomic_read(&ctxt->read_sectors),
c->opts.move_bytes_in_flight >> 9);
- prt_newline(out);
- prt_printf(out, "writes: ios %u/%u sectors %u/%u",
+ prt_printf(out, "writes: ios %u/%u sectors %u/%u\n",
atomic_read(&ctxt->write_ios),
c->opts.move_ios_in_flight,
atomic_read(&ctxt->write_sectors),
c->opts.move_bytes_in_flight >> 9);
- prt_newline(out);
printbuf_indent_add(out, 2);
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
index 0d2b82d8d11f..10bfb31c151b 100644
--- a/fs/bcachefs/movinggc.c
+++ b/fs/bcachefs/movinggc.c
@@ -84,7 +84,7 @@ static int bch2_bucket_is_movable(struct btree_trans *trans,
return 0;
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
- b->k.bucket, BTREE_ITER_CACHED);
+ b->k.bucket, BTREE_ITER_cached);
ret = bkey_err(k);
if (ret)
return ret;
@@ -158,6 +158,8 @@ static int bch2_copygc_get_buckets(struct moving_context *ctxt,
if (bch2_fs_fatal_err_on(ret, c, "%s: from bch2_btree_write_buffer_tryflush()", bch2_err_str(ret)))
return ret;
+ bch2_trans_begin(trans);
+
ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru,
lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0),
lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX),
diff --git a/fs/bcachefs/opts.c b/fs/bcachefs/opts.c
index 08ea0cfc4aef..bb068fd72465 100644
--- a/fs/bcachefs/opts.c
+++ b/fs/bcachefs/opts.c
@@ -7,6 +7,7 @@
#include "disk_groups.h"
#include "error.h"
#include "opts.h"
+#include "recovery_passes.h"
#include "super-io.h"
#include "util.h"
@@ -42,7 +43,7 @@ const char * const __bch2_btree_ids[] = {
NULL
};
-const char * const bch2_csum_types[] = {
+static const char * const __bch2_csum_types[] = {
BCH_CSUM_TYPES()
NULL
};
@@ -52,7 +53,7 @@ const char * const bch2_csum_opts[] = {
NULL
};
-const char * const __bch2_compression_types[] = {
+static const char * const __bch2_compression_types[] = {
BCH_COMPRESSION_TYPES()
NULL
};
@@ -82,18 +83,39 @@ const char * const bch2_member_states[] = {
NULL
};
-const char * const bch2_jset_entry_types[] = {
+static const char * const __bch2_jset_entry_types[] = {
BCH_JSET_ENTRY_TYPES()
NULL
};
-const char * const bch2_fs_usage_types[] = {
+static const char * const __bch2_fs_usage_types[] = {
BCH_FS_USAGE_TYPES()
NULL
};
#undef x
+static void prt_str_opt_boundscheck(struct printbuf *out, const char * const opts[],
+ unsigned nr, const char *type, unsigned idx)
+{
+ if (idx < nr)
+ prt_str(out, opts[idx]);
+ else
+ prt_printf(out, "(unknown %s %u)", type, idx);
+}
+
+#define PRT_STR_OPT_BOUNDSCHECKED(name, type) \
+void bch2_prt_##name(struct printbuf *out, type t) \
+{ \
+ prt_str_opt_boundscheck(out, __bch2_##name##s, ARRAY_SIZE(__bch2_##name##s) - 1, #name, t);\
+}
+
+PRT_STR_OPT_BOUNDSCHECKED(jset_entry_type, enum bch_jset_entry_type);
+PRT_STR_OPT_BOUNDSCHECKED(fs_usage_type, enum bch_fs_usage_type);
+PRT_STR_OPT_BOUNDSCHECKED(data_type, enum bch_data_type);
+PRT_STR_OPT_BOUNDSCHECKED(csum_type, enum bch_csum_type);
+PRT_STR_OPT_BOUNDSCHECKED(compression_type, enum bch_compression_type);
+
static int bch2_opt_fix_errors_parse(struct bch_fs *c, const char *val, u64 *res,
struct printbuf *err)
{
@@ -205,6 +227,9 @@ const struct bch_option bch2_opt_table[] = {
#define OPT_STR(_choices) .type = BCH_OPT_STR, \
.min = 0, .max = ARRAY_SIZE(_choices), \
.choices = _choices
+#define OPT_STR_NOLIMIT(_choices) .type = BCH_OPT_STR, \
+ .min = 0, .max = U64_MAX, \
+ .choices = _choices
#define OPT_FN(_fn) .type = BCH_OPT_FN, .fn = _fn
#define x(_name, _bits, _flags, _type, _sb_opt, _default, _hint, _help) \
diff --git a/fs/bcachefs/opts.h b/fs/bcachefs/opts.h
index 136083c11f3a..25530e0bb2f3 100644
--- a/fs/bcachefs/opts.h
+++ b/fs/bcachefs/opts.h
@@ -16,18 +16,20 @@ extern const char * const bch2_version_upgrade_opts[];
extern const char * const bch2_sb_features[];
extern const char * const bch2_sb_compat[];
extern const char * const __bch2_btree_ids[];
-extern const char * const bch2_csum_types[];
extern const char * const bch2_csum_opts[];
-extern const char * const __bch2_compression_types[];
extern const char * const bch2_compression_opts[];
extern const char * const bch2_str_hash_types[];
extern const char * const bch2_str_hash_opts[];
extern const char * const __bch2_data_types[];
extern const char * const bch2_member_states[];
-extern const char * const bch2_jset_entry_types[];
-extern const char * const bch2_fs_usage_types[];
extern const char * const bch2_d_types[];
+void bch2_prt_jset_entry_type(struct printbuf *, enum bch_jset_entry_type);
+void bch2_prt_fs_usage_type(struct printbuf *, enum bch_fs_usage_type);
+void bch2_prt_data_type(struct printbuf *, enum bch_data_type);
+void bch2_prt_csum_type(struct printbuf *, enum bch_csum_type);
+void bch2_prt_compression_type(struct printbuf *, enum bch_compression_type);
+
static inline const char *bch2_d_type_str(unsigned d_type)
{
return (d_type < BCH_DT_MAX ? bch2_d_types[d_type] : NULL) ?: "(bad d_type)";
@@ -362,12 +364,17 @@ enum fsck_err_opts {
OPT_FS|OPT_MOUNT, \
OPT_BOOL(), \
BCH2_NO_SB_OPT, false, \
- NULL, "Don't replay the journal") \
- x(keep_journal, u8, \
+ NULL, "Exit recovery immediately prior to journal replay")\
+ x(recovery_pass_last, u8, \
+ OPT_FS|OPT_MOUNT, \
+ OPT_STR_NOLIMIT(bch2_recovery_passes), \
+ BCH2_NO_SB_OPT, 0, \
+ NULL, "Exit recovery after specified pass") \
+ x(retain_recovery_info, u8, \
0, \
OPT_BOOL(), \
BCH2_NO_SB_OPT, false, \
- NULL, "Don't free journal entries/keys after startup")\
+ NULL, "Don't free journal entries/keys, scanned btree nodes after startup")\
x(read_entire_journal, u8, \
0, \
OPT_BOOL(), \
@@ -419,11 +426,6 @@ enum fsck_err_opts {
BCH_SB_VERSION_UPGRADE, BCH_VERSION_UPGRADE_compatible, \
NULL, "Set superblock to latest version,\n" \
"allowing any new features to be used") \
- x(buckets_nouse, u8, \
- 0, \
- OPT_BOOL(), \
- BCH2_NO_SB_OPT, false, \
- NULL, "Allocate the buckets_nouse bitmap") \
x(stdio, u64, \
0, \
OPT_UINT(0, S64_MAX), \
@@ -473,7 +475,7 @@ enum fsck_err_opts {
OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
OPT_BOOL(), \
BCH2_NO_SB_OPT, true, \
- NULL, "BTREE_ITER_PREFETCH casuse btree nodes to be\n"\
+ NULL, "BTREE_ITER_prefetch casuse btree nodes to be\n"\
" prefetched sequentially")
struct bch_opts {
diff --git a/fs/bcachefs/printbuf.c b/fs/bcachefs/printbuf.c
index b27d22925929..8b0369185f5c 100644
--- a/fs/bcachefs/printbuf.c
+++ b/fs/bcachefs/printbuf.c
@@ -10,35 +10,50 @@
#include "printbuf.h"
-static inline unsigned printbuf_linelen(struct printbuf *buf)
+static inline unsigned __printbuf_linelen(struct printbuf *buf, unsigned pos)
{
- return buf->pos - buf->last_newline;
+ return pos - buf->last_newline;
}
-int bch2_printbuf_make_room(struct printbuf *out, unsigned extra)
+static inline unsigned printbuf_linelen(struct printbuf *buf)
{
- unsigned new_size;
- char *buf;
+ return __printbuf_linelen(buf, buf->pos);
+}
- if (!out->heap_allocated)
- return 0;
+/*
+ * Returns spaces from start of line, if set, or 0 if unset:
+ */
+static inline unsigned cur_tabstop(struct printbuf *buf)
+{
+ return buf->cur_tabstop < buf->nr_tabstops
+ ? buf->_tabstops[buf->cur_tabstop]
+ : 0;
+}
+int bch2_printbuf_make_room(struct printbuf *out, unsigned extra)
+{
/* Reserved space for terminating nul: */
extra += 1;
- if (out->pos + extra < out->size)
+ if (out->pos + extra <= out->size)
return 0;
- new_size = roundup_pow_of_two(out->size + extra);
+ if (!out->heap_allocated) {
+ out->overflow = true;
+ return 0;
+ }
+
+ unsigned new_size = roundup_pow_of_two(out->size + extra);
/*
* Note: output buffer must be freeable with kfree(), it's not required
* that the user use printbuf_exit().
*/
- buf = krealloc(out->buf, new_size, !out->atomic ? GFP_KERNEL : GFP_NOWAIT);
+ char *buf = krealloc(out->buf, new_size, !out->atomic ? GFP_KERNEL : GFP_NOWAIT);
if (!buf) {
out->allocation_failure = true;
+ out->overflow = true;
return -ENOMEM;
}
@@ -47,6 +62,92 @@ int bch2_printbuf_make_room(struct printbuf *out, unsigned extra)
return 0;
}
+static void printbuf_advance_pos(struct printbuf *out, unsigned len)
+{
+ out->pos += min(len, printbuf_remaining(out));
+}
+
+static void printbuf_insert_spaces(struct printbuf *out, unsigned pos, unsigned nr)
+{
+ unsigned move = out->pos - pos;
+
+ bch2_printbuf_make_room(out, nr);
+
+ if (pos + nr < out->size)
+ memmove(out->buf + pos + nr,
+ out->buf + pos,
+ min(move, out->size - 1 - pos - nr));
+
+ if (pos < out->size)
+ memset(out->buf + pos, ' ', min(nr, out->size - pos));
+
+ printbuf_advance_pos(out, nr);
+ printbuf_nul_terminate_reserved(out);
+}
+
+static void __printbuf_do_indent(struct printbuf *out, unsigned pos)
+{
+ while (true) {
+ int pad;
+ unsigned len = out->pos - pos;
+ char *p = out->buf + pos;
+ char *n = memscan(p, '\n', len);
+ if (cur_tabstop(out)) {
+ n = min(n, (char *) memscan(p, '\r', len));
+ n = min(n, (char *) memscan(p, '\t', len));
+ }
+
+ pos = n - out->buf;
+ if (pos == out->pos)
+ break;
+
+ switch (*n) {
+ case '\n':
+ pos++;
+ out->last_newline = pos;
+
+ printbuf_insert_spaces(out, pos, out->indent);
+
+ pos = min(pos + out->indent, out->pos);
+ out->last_field = pos;
+ out->cur_tabstop = 0;
+ break;
+ case '\r':
+ memmove(n, n + 1, out->pos - pos);
+ --out->pos;
+ pad = (int) cur_tabstop(out) - (int) __printbuf_linelen(out, pos);
+ if (pad > 0) {
+ printbuf_insert_spaces(out, out->last_field, pad);
+ pos += pad;
+ }
+
+ out->last_field = pos;
+ out->cur_tabstop++;
+ break;
+ case '\t':
+ pad = (int) cur_tabstop(out) - (int) __printbuf_linelen(out, pos) - 1;
+ if (pad > 0) {
+ *n = ' ';
+ printbuf_insert_spaces(out, pos, pad - 1);
+ pos += pad;
+ } else {
+ memmove(n, n + 1, out->pos - pos);
+ --out->pos;
+ }
+
+ out->last_field = pos;
+ out->cur_tabstop++;
+ break;
+ }
+ }
+}
+
+static inline void printbuf_do_indent(struct printbuf *out, unsigned pos)
+{
+ if (out->has_indent_or_tabstops && !out->suppress_indent_tabstop_handling)
+ __printbuf_do_indent(out, pos);
+}
+
void bch2_prt_vprintf(struct printbuf *out, const char *fmt, va_list args)
{
int len;
@@ -55,14 +156,14 @@ void bch2_prt_vprintf(struct printbuf *out, const char *fmt, va_list args)
va_list args2;
va_copy(args2, args);
- len = vsnprintf(out->buf + out->pos, printbuf_remaining(out), fmt, args2);
+ len = vsnprintf(out->buf + out->pos, printbuf_remaining_size(out), fmt, args2);
va_end(args2);
- } while (len + 1 >= printbuf_remaining(out) &&
- !bch2_printbuf_make_room(out, len + 1));
+ } while (len > printbuf_remaining(out) &&
+ !bch2_printbuf_make_room(out, len));
- len = min_t(size_t, len,
- printbuf_remaining(out) ? printbuf_remaining(out) - 1 : 0);
- out->pos += len;
+ unsigned indent_pos = out->pos;
+ printbuf_advance_pos(out, len);
+ printbuf_do_indent(out, indent_pos);
}
void bch2_prt_printf(struct printbuf *out, const char *fmt, ...)
@@ -72,14 +173,14 @@ void bch2_prt_printf(struct printbuf *out, const char *fmt, ...)
do {
va_start(args, fmt);
- len = vsnprintf(out->buf + out->pos, printbuf_remaining(out), fmt, args);
+ len = vsnprintf(out->buf + out->pos, printbuf_remaining_size(out), fmt, args);
va_end(args);
- } while (len + 1 >= printbuf_remaining(out) &&
- !bch2_printbuf_make_room(out, len + 1));
+ } while (len > printbuf_remaining(out) &&
+ !bch2_printbuf_make_room(out, len));
- len = min_t(size_t, len,
- printbuf_remaining(out) ? printbuf_remaining(out) - 1 : 0);
- out->pos += len;
+ unsigned indent_pos = out->pos;
+ printbuf_advance_pos(out, len);
+ printbuf_do_indent(out, indent_pos);
}
/**
@@ -194,33 +295,20 @@ void bch2_printbuf_indent_sub(struct printbuf *buf, unsigned spaces)
void bch2_prt_newline(struct printbuf *buf)
{
- unsigned i;
-
bch2_printbuf_make_room(buf, 1 + buf->indent);
- __prt_char(buf, '\n');
+ __prt_char_reserved(buf, '\n');
buf->last_newline = buf->pos;
- for (i = 0; i < buf->indent; i++)
- __prt_char(buf, ' ');
+ __prt_chars_reserved(buf, ' ', buf->indent);
- printbuf_nul_terminate(buf);
+ printbuf_nul_terminate_reserved(buf);
buf->last_field = buf->pos;
buf->cur_tabstop = 0;
}
-/*
- * Returns spaces from start of line, if set, or 0 if unset:
- */
-static inline unsigned cur_tabstop(struct printbuf *buf)
-{
- return buf->cur_tabstop < buf->nr_tabstops
- ? buf->_tabstops[buf->cur_tabstop]
- : 0;
-}
-
static void __prt_tab(struct printbuf *out)
{
int spaces = max_t(int, 0, cur_tabstop(out) - printbuf_linelen(out));
@@ -247,24 +335,9 @@ void bch2_prt_tab(struct printbuf *out)
static void __prt_tab_rjust(struct printbuf *buf)
{
- unsigned move = buf->pos - buf->last_field;
int pad = (int) cur_tabstop(buf) - (int) printbuf_linelen(buf);
-
- if (pad > 0) {
- bch2_printbuf_make_room(buf, pad);
-
- if (buf->last_field + pad < buf->size)
- memmove(buf->buf + buf->last_field + pad,
- buf->buf + buf->last_field,
- min(move, buf->size - 1 - buf->last_field - pad));
-
- if (buf->last_field < buf->size)
- memset(buf->buf + buf->last_field, ' ',
- min((unsigned) pad, buf->size - buf->last_field));
-
- buf->pos += pad;
- printbuf_nul_terminate(buf);
- }
+ if (pad > 0)
+ printbuf_insert_spaces(buf, buf->last_field, pad);
buf->last_field = buf->pos;
buf->cur_tabstop++;
@@ -301,41 +374,9 @@ void bch2_prt_tab_rjust(struct printbuf *buf)
*/
void bch2_prt_bytes_indented(struct printbuf *out, const char *str, unsigned count)
{
- const char *unprinted_start = str;
- const char *end = str + count;
-
- if (!out->has_indent_or_tabstops || out->suppress_indent_tabstop_handling) {
- prt_bytes(out, str, count);
- return;
- }
-
- while (str != end) {
- switch (*str) {
- case '\n':
- prt_bytes(out, unprinted_start, str - unprinted_start);
- unprinted_start = str + 1;
- bch2_prt_newline(out);
- break;
- case '\t':
- if (likely(cur_tabstop(out))) {
- prt_bytes(out, unprinted_start, str - unprinted_start);
- unprinted_start = str + 1;
- __prt_tab(out);
- }
- break;
- case '\r':
- if (likely(cur_tabstop(out))) {
- prt_bytes(out, unprinted_start, str - unprinted_start);
- unprinted_start = str + 1;
- __prt_tab_rjust(out);
- }
- break;
- }
-
- str++;
- }
-
- prt_bytes(out, unprinted_start, str - unprinted_start);
+ unsigned indent_pos = out->pos;
+ prt_bytes(out, str, count);
+ printbuf_do_indent(out, indent_pos);
}
/**
@@ -348,9 +389,10 @@ void bch2_prt_bytes_indented(struct printbuf *out, const char *str, unsigned cou
void bch2_prt_human_readable_u64(struct printbuf *out, u64 v)
{
bch2_printbuf_make_room(out, 10);
- out->pos += string_get_size(v, 1, !out->si_units,
- out->buf + out->pos,
- printbuf_remaining_size(out));
+ unsigned len = string_get_size(v, 1, !out->si_units,
+ out->buf + out->pos,
+ printbuf_remaining_size(out));
+ printbuf_advance_pos(out, len);
}
/**
@@ -402,9 +444,7 @@ void bch2_prt_string_option(struct printbuf *out,
const char * const list[],
size_t selected)
{
- size_t i;
-
- for (i = 0; list[i]; i++)
+ for (size_t i = 0; list[i]; i++)
bch2_prt_printf(out, i == selected ? "[%s] " : "%s ", list[i]);
}
diff --git a/fs/bcachefs/printbuf.h b/fs/bcachefs/printbuf.h
index 9a4a56c40937..9ecc56bc9635 100644
--- a/fs/bcachefs/printbuf.h
+++ b/fs/bcachefs/printbuf.h
@@ -86,6 +86,7 @@ struct printbuf {
u8 atomic;
bool allocation_failure:1;
bool heap_allocated:1;
+ bool overflow:1;
enum printbuf_si si_units:1;
bool human_readable_units:1;
bool has_indent_or_tabstops:1;
@@ -142,7 +143,9 @@ void bch2_prt_bitflags_vector(struct printbuf *, const char * const[],
*/
static inline unsigned printbuf_remaining_size(struct printbuf *out)
{
- return out->pos < out->size ? out->size - out->pos : 0;
+ if (WARN_ON(out->size && out->pos >= out->size))
+ out->pos = out->size - 1;
+ return out->size - out->pos;
}
/*
@@ -151,7 +154,7 @@ static inline unsigned printbuf_remaining_size(struct printbuf *out)
*/
static inline unsigned printbuf_remaining(struct printbuf *out)
{
- return out->pos < out->size ? out->size - out->pos - 1 : 0;
+ return out->size ? printbuf_remaining_size(out) - 1 : 0;
}
static inline unsigned printbuf_written(struct printbuf *out)
@@ -159,30 +162,25 @@ static inline unsigned printbuf_written(struct printbuf *out)
return out->size ? min(out->pos, out->size - 1) : 0;
}
-/*
- * Returns true if output was truncated:
- */
-static inline bool printbuf_overflowed(struct printbuf *out)
+static inline void printbuf_nul_terminate_reserved(struct printbuf *out)
{
- return out->pos >= out->size;
+ if (WARN_ON(out->size && out->pos >= out->size))
+ out->pos = out->size - 1;
+ if (out->size)
+ out->buf[out->pos] = 0;
}
static inline void printbuf_nul_terminate(struct printbuf *out)
{
bch2_printbuf_make_room(out, 1);
-
- if (out->pos < out->size)
- out->buf[out->pos] = 0;
- else if (out->size)
- out->buf[out->size - 1] = 0;
+ printbuf_nul_terminate_reserved(out);
}
/* Doesn't call bch2_printbuf_make_room(), doesn't nul terminate: */
static inline void __prt_char_reserved(struct printbuf *out, char c)
{
if (printbuf_remaining(out))
- out->buf[out->pos] = c;
- out->pos++;
+ out->buf[out->pos++] = c;
}
/* Doesn't nul terminate: */
@@ -194,37 +192,34 @@ static inline void __prt_char(struct printbuf *out, char c)
static inline void prt_char(struct printbuf *out, char c)
{
- __prt_char(out, c);
- printbuf_nul_terminate(out);
+ bch2_printbuf_make_room(out, 2);
+ __prt_char_reserved(out, c);
+ printbuf_nul_terminate_reserved(out);
}
static inline void __prt_chars_reserved(struct printbuf *out, char c, unsigned n)
{
- unsigned i, can_print = min(n, printbuf_remaining(out));
+ unsigned can_print = min(n, printbuf_remaining(out));
- for (i = 0; i < can_print; i++)
+ for (unsigned i = 0; i < can_print; i++)
out->buf[out->pos++] = c;
- out->pos += n - can_print;
}
static inline void prt_chars(struct printbuf *out, char c, unsigned n)
{
bch2_printbuf_make_room(out, n);
__prt_chars_reserved(out, c, n);
- printbuf_nul_terminate(out);
+ printbuf_nul_terminate_reserved(out);
}
static inline void prt_bytes(struct printbuf *out, const void *b, unsigned n)
{
- unsigned i, can_print;
-
bch2_printbuf_make_room(out, n);
- can_print = min(n, printbuf_remaining(out));
+ unsigned can_print = min(n, printbuf_remaining(out));
- for (i = 0; i < can_print; i++)
+ for (unsigned i = 0; i < can_print; i++)
out->buf[out->pos++] = ((char *) b)[i];
- out->pos += n - can_print;
printbuf_nul_terminate(out);
}
@@ -241,18 +236,18 @@ static inline void prt_str_indented(struct printbuf *out, const char *str)
static inline void prt_hex_byte(struct printbuf *out, u8 byte)
{
- bch2_printbuf_make_room(out, 2);
+ bch2_printbuf_make_room(out, 3);
__prt_char_reserved(out, hex_asc_hi(byte));
__prt_char_reserved(out, hex_asc_lo(byte));
- printbuf_nul_terminate(out);
+ printbuf_nul_terminate_reserved(out);
}
static inline void prt_hex_byte_upper(struct printbuf *out, u8 byte)
{
- bch2_printbuf_make_room(out, 2);
+ bch2_printbuf_make_room(out, 3);
__prt_char_reserved(out, hex_asc_upper_hi(byte));
__prt_char_reserved(out, hex_asc_upper_lo(byte));
- printbuf_nul_terminate(out);
+ printbuf_nul_terminate_reserved(out);
}
/**
diff --git a/fs/bcachefs/quota.c b/fs/bcachefs/quota.c
index e68b34eab90a..a0cca8b70e0a 100644
--- a/fs/bcachefs/quota.c
+++ b/fs/bcachefs/quota.c
@@ -20,7 +20,7 @@ static const char * const bch2_quota_counters[] = {
};
static int bch2_sb_quota_validate(struct bch_sb *sb, struct bch_sb_field *f,
- struct printbuf *err)
+ enum bch_validate_flags flags, struct printbuf *err)
{
struct bch_sb_field_quota *q = field_to_type(f, quota);
@@ -60,8 +60,7 @@ const struct bch_sb_field_ops bch_sb_field_ops_quota = {
};
int bch2_quota_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags,
- struct printbuf *err)
+ enum bch_validate_flags flags, struct printbuf *err)
{
int ret = 0;
@@ -97,45 +96,14 @@ static void qc_info_to_text(struct printbuf *out, struct qc_info *i)
printbuf_tabstops_reset(out);
printbuf_tabstop_push(out, 20);
- prt_str(out, "i_fieldmask");
- prt_tab(out);
- prt_printf(out, "%x", i->i_fieldmask);
- prt_newline(out);
-
- prt_str(out, "i_flags");
- prt_tab(out);
- prt_printf(out, "%u", i->i_flags);
- prt_newline(out);
-
- prt_str(out, "i_spc_timelimit");
- prt_tab(out);
- prt_printf(out, "%u", i->i_spc_timelimit);
- prt_newline(out);
-
- prt_str(out, "i_ino_timelimit");
- prt_tab(out);
- prt_printf(out, "%u", i->i_ino_timelimit);
- prt_newline(out);
-
- prt_str(out, "i_rt_spc_timelimit");
- prt_tab(out);
- prt_printf(out, "%u", i->i_rt_spc_timelimit);
- prt_newline(out);
-
- prt_str(out, "i_spc_warnlimit");
- prt_tab(out);
- prt_printf(out, "%u", i->i_spc_warnlimit);
- prt_newline(out);
-
- prt_str(out, "i_ino_warnlimit");
- prt_tab(out);
- prt_printf(out, "%u", i->i_ino_warnlimit);
- prt_newline(out);
-
- prt_str(out, "i_rt_spc_warnlimit");
- prt_tab(out);
- prt_printf(out, "%u", i->i_rt_spc_warnlimit);
- prt_newline(out);
+ prt_printf(out, "i_fieldmask\t%x\n", i->i_fieldmask);
+ prt_printf(out, "i_flags\t%u\n", i->i_flags);
+ prt_printf(out, "i_spc_timelimit\t%u\n", i->i_spc_timelimit);
+ prt_printf(out, "i_ino_timelimit\t%u\n", i->i_ino_timelimit);
+ prt_printf(out, "i_rt_spc_timelimit\t%u\n", i->i_rt_spc_timelimit);
+ prt_printf(out, "i_spc_warnlimit\t%u\n", i->i_spc_warnlimit);
+ prt_printf(out, "i_ino_warnlimit\t%u\n", i->i_ino_warnlimit);
+ prt_printf(out, "i_rt_spc_warnlimit\t%u\n", i->i_rt_spc_warnlimit);
}
static void qc_dqblk_to_text(struct printbuf *out, struct qc_dqblk *q)
@@ -143,60 +111,17 @@ static void qc_dqblk_to_text(struct printbuf *out, struct qc_dqblk *q)
printbuf_tabstops_reset(out);
printbuf_tabstop_push(out, 20);
- prt_str(out, "d_fieldmask");
- prt_tab(out);
- prt_printf(out, "%x", q->d_fieldmask);
- prt_newline(out);
-
- prt_str(out, "d_spc_hardlimit");
- prt_tab(out);
- prt_printf(out, "%llu", q->d_spc_hardlimit);
- prt_newline(out);
-
- prt_str(out, "d_spc_softlimit");
- prt_tab(out);
- prt_printf(out, "%llu", q->d_spc_softlimit);
- prt_newline(out);
-
- prt_str(out, "d_ino_hardlimit");
- prt_tab(out);
- prt_printf(out, "%llu", q->d_ino_hardlimit);
- prt_newline(out);
-
- prt_str(out, "d_ino_softlimit");
- prt_tab(out);
- prt_printf(out, "%llu", q->d_ino_softlimit);
- prt_newline(out);
-
- prt_str(out, "d_space");
- prt_tab(out);
- prt_printf(out, "%llu", q->d_space);
- prt_newline(out);
-
- prt_str(out, "d_ino_count");
- prt_tab(out);
- prt_printf(out, "%llu", q->d_ino_count);
- prt_newline(out);
-
- prt_str(out, "d_ino_timer");
- prt_tab(out);
- prt_printf(out, "%llu", q->d_ino_timer);
- prt_newline(out);
-
- prt_str(out, "d_spc_timer");
- prt_tab(out);
- prt_printf(out, "%llu", q->d_spc_timer);
- prt_newline(out);
-
- prt_str(out, "d_ino_warns");
- prt_tab(out);
- prt_printf(out, "%i", q->d_ino_warns);
- prt_newline(out);
-
- prt_str(out, "d_spc_warns");
- prt_tab(out);
- prt_printf(out, "%i", q->d_spc_warns);
- prt_newline(out);
+ prt_printf(out, "d_fieldmask\t%x\n", q->d_fieldmask);
+ prt_printf(out, "d_spc_hardlimit\t%llu\n", q->d_spc_hardlimit);
+ prt_printf(out, "d_spc_softlimit\t%llu\n", q->d_spc_softlimit);
+ prt_printf(out, "d_ino_hardlimit\%llu\n", q->d_ino_hardlimit);
+ prt_printf(out, "d_ino_softlimit\t%llu\n", q->d_ino_softlimit);
+ prt_printf(out, "d_space\t%llu\n", q->d_space);
+ prt_printf(out, "d_ino_count\t%llu\n", q->d_ino_count);
+ prt_printf(out, "d_ino_timer\t%llu\n", q->d_ino_timer);
+ prt_printf(out, "d_spc_timer\t%llu\n", q->d_spc_timer);
+ prt_printf(out, "d_ino_warns\t%i\n", q->d_ino_warns);
+ prt_printf(out, "d_spc_warns\t%i\n", q->d_spc_warns);
}
static inline unsigned __next_qtype(unsigned i, unsigned qtypes)
@@ -560,13 +485,11 @@ static int bch2_fs_quota_read_inode(struct btree_trans *trans,
struct bch_fs *c = trans->c;
struct bch_inode_unpacked u;
struct bch_snapshot_tree s_t;
- int ret;
+ u32 tree = bch2_snapshot_tree(c, k.k->p.snapshot);
- ret = bch2_snapshot_tree_lookup(trans,
- bch2_snapshot_tree(c, k.k->p.snapshot), &s_t);
+ int ret = bch2_snapshot_tree_lookup(trans, tree, &s_t);
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
- "%s: snapshot tree %u not found", __func__,
- snapshot_t(c, k.k->p.snapshot)->tree);
+ "%s: snapshot tree %u not found", __func__, tree);
if (ret)
return ret;
@@ -612,10 +535,10 @@ int bch2_fs_quota_read(struct bch_fs *c)
int ret = bch2_trans_run(c,
for_each_btree_key(trans, iter, BTREE_ID_quotas, POS_MIN,
- BTREE_ITER_PREFETCH, k,
+ BTREE_ITER_prefetch, k,
__bch2_quota_set(c, k, NULL)) ?:
for_each_btree_key(trans, iter, BTREE_ID_inodes, POS_MIN,
- BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
+ BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
bch2_fs_quota_read_inode(trans, &iter, k)));
bch_err_fn(c, ret);
return ret;
@@ -902,7 +825,7 @@ static int bch2_set_quota_trans(struct btree_trans *trans,
int ret;
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_quotas, new_quota->k.p,
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+ BTREE_ITER_slots|BTREE_ITER_intent);
ret = bkey_err(k);
if (unlikely(ret))
return ret;
diff --git a/fs/bcachefs/quota.h b/fs/bcachefs/quota.h
index 884f601f41c4..02d37a332218 100644
--- a/fs/bcachefs/quota.h
+++ b/fs/bcachefs/quota.h
@@ -5,11 +5,11 @@
#include "inode.h"
#include "quota_types.h"
-enum bkey_invalid_flags;
+enum bch_validate_flags;
extern const struct bch_sb_field_ops bch_sb_field_ops_quota;
int bch2_quota_invalid(struct bch_fs *, struct bkey_s_c,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
void bch2_quota_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
#define bch2_bkey_ops_quota ((struct bkey_ops) { \
diff --git a/fs/bcachefs/rebalance.c b/fs/bcachefs/rebalance.c
index 56336f3dd1d0..cf81e5128c3a 100644
--- a/fs/bcachefs/rebalance.c
+++ b/fs/bcachefs/rebalance.c
@@ -42,7 +42,7 @@ static int __bch2_set_rebalance_needs_scan(struct btree_trans *trans, u64 inum)
bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
- BTREE_ITER_INTENT);
+ BTREE_ITER_intent);
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
@@ -89,7 +89,7 @@ static int bch2_clear_rebalance_needs_scan(struct btree_trans *trans, u64 inum,
bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
- BTREE_ITER_INTENT);
+ BTREE_ITER_intent);
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
@@ -140,7 +140,7 @@ static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans,
bch2_trans_iter_init(trans, extent_iter,
work_pos.inode ? BTREE_ID_extents : BTREE_ID_reflink,
work_pos,
- BTREE_ITER_ALL_SNAPSHOTS);
+ BTREE_ITER_all_snapshots);
k = bch2_btree_iter_peek_slot(extent_iter);
if (bkey_err(k))
return k;
@@ -323,12 +323,14 @@ static int do_rebalance(struct moving_context *ctxt)
struct bkey_s_c k;
int ret = 0;
+ bch2_trans_begin(trans);
+
bch2_move_stats_init(&r->work_stats, "rebalance_work");
bch2_move_stats_init(&r->scan_stats, "rebalance_scan");
bch2_trans_iter_init(trans, &rebalance_work_iter,
BTREE_ID_rebalance_work, POS_MIN,
- BTREE_ITER_ALL_SNAPSHOTS);
+ BTREE_ITER_all_snapshots);
while (!bch2_move_ratelimit(ctxt)) {
if (!r->enabled) {
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index 03f9d6afe467..1266916ac03f 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -1,35 +1,31 @@
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
-#include "backpointers.h"
-#include "bkey_buf.h"
#include "alloc_background.h"
-#include "btree_gc.h"
+#include "bkey_buf.h"
#include "btree_journal_iter.h"
+#include "btree_node_scan.h"
#include "btree_update.h"
#include "btree_update_interior.h"
#include "btree_io.h"
#include "buckets.h"
#include "dirent.h"
-#include "ec.h"
#include "errcode.h"
#include "error.h"
#include "fs-common.h"
-#include "fsck.h"
#include "journal_io.h"
#include "journal_reclaim.h"
#include "journal_seq_blacklist.h"
-#include "lru.h"
#include "logged_ops.h"
#include "move.h"
#include "quota.h"
#include "rebalance.h"
#include "recovery.h"
+#include "recovery_passes.h"
#include "replicas.h"
#include "sb-clean.h"
#include "sb-downgrade.h"
#include "snapshot.h"
-#include "subvolume.h"
#include "super-io.h"
#include <linux/sort.h>
@@ -37,22 +33,22 @@
#define QSTR(n) { { { .len = strlen(n) } }, .name = n }
-static bool btree_id_is_alloc(enum btree_id id)
+void bch2_btree_lost_data(struct bch_fs *c, enum btree_id btree)
{
- switch (id) {
- case BTREE_ID_alloc:
- case BTREE_ID_backpointers:
- case BTREE_ID_need_discard:
- case BTREE_ID_freespace:
- case BTREE_ID_bucket_gens:
- return true;
- default:
- return false;
+ u64 b = BIT_ULL(btree);
+
+ if (!(c->sb.btrees_lost_data & b)) {
+ bch_err(c, "flagging btree %s lost data", bch2_btree_id_str(btree));
+
+ mutex_lock(&c->sb_lock);
+ bch2_sb_field_get(c->disk_sb.sb, ext)->btrees_lost_data |= cpu_to_le64(b);
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
}
}
/* for -o reconstruct_alloc: */
-static void do_reconstruct_alloc(struct bch_fs *c)
+static void bch2_reconstruct_alloc(struct bch_fs *c)
{
bch2_journal_log_msg(c, "dropping alloc info");
bch_info(c, "dropping and reconstructing all alloc info");
@@ -69,9 +65,20 @@ static void do_reconstruct_alloc(struct bch_fs *c)
__set_bit_le64(BCH_FSCK_ERR_ptr_to_missing_alloc_key, ext->errors_silent);
__set_bit_le64(BCH_FSCK_ERR_ptr_gen_newer_than_bucket_gen, ext->errors_silent);
__set_bit_le64(BCH_FSCK_ERR_stale_dirty_ptr, ext->errors_silent);
+
+ __set_bit_le64(BCH_FSCK_ERR_dev_usage_buckets_wrong, ext->errors_silent);
+ __set_bit_le64(BCH_FSCK_ERR_dev_usage_sectors_wrong, ext->errors_silent);
+ __set_bit_le64(BCH_FSCK_ERR_dev_usage_fragmented_wrong, ext->errors_silent);
+
+ __set_bit_le64(BCH_FSCK_ERR_fs_usage_btree_wrong, ext->errors_silent);
+ __set_bit_le64(BCH_FSCK_ERR_fs_usage_cached_wrong, ext->errors_silent);
+ __set_bit_le64(BCH_FSCK_ERR_fs_usage_persistent_reserved_wrong, ext->errors_silent);
+ __set_bit_le64(BCH_FSCK_ERR_fs_usage_replicas_wrong, ext->errors_silent);
+
__set_bit_le64(BCH_FSCK_ERR_alloc_key_data_type_wrong, ext->errors_silent);
__set_bit_le64(BCH_FSCK_ERR_alloc_key_gen_wrong, ext->errors_silent);
__set_bit_le64(BCH_FSCK_ERR_alloc_key_dirty_sectors_wrong, ext->errors_silent);
+ __set_bit_le64(BCH_FSCK_ERR_alloc_key_cached_sectors_wrong, ext->errors_silent);
__set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_wrong, ext->errors_silent);
__set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_redundancy_wrong, ext->errors_silent);
__set_bit_le64(BCH_FSCK_ERR_need_discard_key_wrong, ext->errors_silent);
@@ -87,15 +94,17 @@ static void do_reconstruct_alloc(struct bch_fs *c)
c->recovery_passes_explicit |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
- struct journal_keys *keys = &c->journal_keys;
- size_t src, dst;
-
- move_gap(keys, keys->nr);
- for (src = 0, dst = 0; src < keys->nr; src++)
- if (!btree_id_is_alloc(keys->data[src].btree_id))
- keys->data[dst++] = keys->data[src];
- keys->nr = keys->gap = dst;
+ bch2_shoot_down_journal_keys(c, BTREE_ID_alloc,
+ 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
+ bch2_shoot_down_journal_keys(c, BTREE_ID_backpointers,
+ 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
+ bch2_shoot_down_journal_keys(c, BTREE_ID_need_discard,
+ 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
+ bch2_shoot_down_journal_keys(c, BTREE_ID_freespace,
+ 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
+ bch2_shoot_down_journal_keys(c, BTREE_ID_bucket_gens,
+ 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
}
/*
@@ -127,9 +136,9 @@ static int bch2_journal_replay_key(struct btree_trans *trans,
{
struct btree_iter iter;
unsigned iter_flags =
- BTREE_ITER_INTENT|
- BTREE_ITER_NOT_EXTENTS;
- unsigned update_flags = BTREE_TRIGGER_NORUN;
+ BTREE_ITER_intent|
+ BTREE_ITER_not_extents;
+ unsigned update_flags = BTREE_TRIGGER_norun;
int ret;
if (k->overwritten)
@@ -138,17 +147,17 @@ static int bch2_journal_replay_key(struct btree_trans *trans,
trans->journal_res.seq = k->journal_seq;
/*
- * BTREE_UPDATE_KEY_CACHE_RECLAIM disables key cache lookup/update to
+ * BTREE_UPDATE_key_cache_reclaim disables key cache lookup/update to
* keep the key cache coherent with the underlying btree. Nothing
* besides the allocator is doing updates yet so we don't need key cache
* coherency for non-alloc btrees, and key cache fills for snapshots
- * btrees use BTREE_ITER_FILTER_SNAPSHOTS, which isn't available until
+ * btrees use BTREE_ITER_filter_snapshots, which isn't available until
* the snapshots recovery pass runs.
*/
if (!k->level && k->btree_id == BTREE_ID_alloc)
- iter_flags |= BTREE_ITER_CACHED;
+ iter_flags |= BTREE_ITER_cached;
else
- update_flags |= BTREE_UPDATE_KEY_CACHE_RECLAIM;
+ update_flags |= BTREE_UPDATE_key_cache_reclaim;
bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
BTREE_MAX_DEPTH, k->level,
@@ -186,14 +195,15 @@ static int journal_sort_seq_cmp(const void *_l, const void *_r)
return cmp_int(l->journal_seq, r->journal_seq);
}
-static int bch2_journal_replay(struct bch_fs *c)
+int bch2_journal_replay(struct bch_fs *c)
{
struct journal_keys *keys = &c->journal_keys;
DARRAY(struct journal_key *) keys_sorted = { 0 };
struct journal *j = &c->journal;
u64 start_seq = c->journal_replay_seq_start;
u64 end_seq = c->journal_replay_seq_start;
- struct btree_trans *trans = bch2_trans_get(c);
+ struct btree_trans *trans = NULL;
+ bool immediate_flush = false;
int ret = 0;
if (keys->nr) {
@@ -206,6 +216,7 @@ static int bch2_journal_replay(struct bch_fs *c)
BUG_ON(!atomic_read(&keys->ref));
move_gap(keys, keys->nr);
+ trans = bch2_trans_get(c);
/*
* First, attempt to replay keys in sorted order. This is more
@@ -215,6 +226,13 @@ static int bch2_journal_replay(struct bch_fs *c)
darray_for_each(*keys, k) {
cond_resched();
+ /*
+ * k->allocated means the key wasn't read in from the journal,
+ * rather it was from early repair code
+ */
+ if (k->allocated)
+ immediate_flush = true;
+
/* Skip fastpath if we're low on space in the journal */
ret = c->journal.watermark ? -1 :
commit_do(trans, NULL, NULL,
@@ -243,7 +261,10 @@ static int bch2_journal_replay(struct bch_fs *c)
struct journal_key *k = *kp;
- replay_now_at(j, k->journal_seq);
+ if (k->journal_seq)
+ replay_now_at(j, k->journal_seq);
+ else
+ replay_now_at(j, j->replay_journal_seq_end);
ret = commit_do(trans, NULL, NULL,
BCH_TRANS_COMMIT_no_enospc|
@@ -266,7 +287,8 @@ static int bch2_journal_replay(struct bch_fs *c)
bch2_trans_put(trans);
trans = NULL;
- if (!c->opts.keep_journal)
+ if (!c->opts.retain_recovery_info &&
+ c->recovery_pass_done >= BCH_RECOVERY_PASS_journal_replay)
bch2_journal_keys_put_initial(c);
replay_now_at(j, j->replay_journal_seq_end);
@@ -274,6 +296,12 @@ static int bch2_journal_replay(struct bch_fs *c)
bch2_journal_set_replay_done(j);
+ /* if we did any repair, flush it immediately */
+ if (immediate_flush) {
+ bch2_journal_flush_all_pins(&c->journal);
+ ret = bch2_journal_meta(&c->journal);
+ }
+
if (keys->nr)
bch2_journal_log_msg(c, "journal replay finished");
err:
@@ -345,14 +373,17 @@ static int journal_replay_entry_early(struct bch_fs *c,
case BCH_JSET_ENTRY_dev_usage: {
struct jset_entry_dev_usage *u =
container_of(entry, struct jset_entry_dev_usage, entry);
- struct bch_dev *ca = bch_dev_bkey_exists(c, le32_to_cpu(u->dev));
- unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
-
- for (i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) {
- ca->usage_base->d[i].buckets = le64_to_cpu(u->d[i].buckets);
- ca->usage_base->d[i].sectors = le64_to_cpu(u->d[i].sectors);
- ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented);
- }
+ unsigned nr_types = jset_entry_dev_usage_nr_types(u);
+
+ rcu_read_lock();
+ struct bch_dev *ca = bch2_dev_rcu(c, le32_to_cpu(u->dev));
+ if (ca)
+ for (unsigned i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) {
+ ca->usage_base->d[i].buckets = le64_to_cpu(u->d[i].buckets);
+ ca->usage_base->d[i].sectors = le64_to_cpu(u->d[i].sectors);
+ ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented);
+ }
+ rcu_read_unlock();
break;
}
@@ -423,10 +454,9 @@ static int journal_replay_early(struct bch_fs *c,
static int read_btree_roots(struct bch_fs *c)
{
- unsigned i;
int ret = 0;
- for (i = 0; i < btree_id_nr_alive(c); i++) {
+ for (unsigned i = 0; i < btree_id_nr_alive(c); i++) {
struct btree_root *r = bch2_btree_id_root(c, i);
if (!r->alive)
@@ -435,186 +465,46 @@ static int read_btree_roots(struct bch_fs *c)
if (btree_id_is_alloc(i) && c->opts.reconstruct_alloc)
continue;
- if (r->error) {
- __fsck_err(c,
- btree_id_is_alloc(i)
- ? FSCK_CAN_IGNORE : 0,
- btree_root_bkey_invalid,
- "invalid btree root %s",
- bch2_btree_id_str(i));
- if (i == BTREE_ID_alloc)
+ if (mustfix_fsck_err_on((ret = r->error),
+ c, btree_root_bkey_invalid,
+ "invalid btree root %s",
+ bch2_btree_id_str(i)) ||
+ mustfix_fsck_err_on((ret = r->error = bch2_btree_root_read(c, i, &r->key, r->level)),
+ c, btree_root_read_error,
+ "error reading btree root %s l=%u: %s",
+ bch2_btree_id_str(i), r->level, bch2_err_str(ret))) {
+ if (btree_id_is_alloc(i)) {
+ c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_allocations);
+ c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_alloc_info);
+ c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_lrus);
+ c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_extents_to_backpointers);
+ c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_alloc_to_lru_refs);
c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
- }
+ r->error = 0;
+ } else if (!(c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes))) {
+ bch_info(c, "will run btree node scan");
+ c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes);
+ c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_topology);
+ }
- ret = bch2_btree_root_read(c, i, &r->key, r->level);
- if (ret) {
- fsck_err(c,
- btree_root_read_error,
- "error reading btree root %s",
- bch2_btree_id_str(i));
- if (btree_id_is_alloc(i))
- c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
ret = 0;
+ bch2_btree_lost_data(c, i);
}
}
- for (i = 0; i < BTREE_ID_NR; i++) {
+ for (unsigned i = 0; i < BTREE_ID_NR; i++) {
struct btree_root *r = bch2_btree_id_root(c, i);
- if (!r->b) {
+ if (!r->b && !r->error) {
r->alive = false;
r->level = 0;
- bch2_btree_root_alloc(c, i);
+ bch2_btree_root_alloc_fake(c, i, 0);
}
}
fsck_err:
return ret;
}
-static int bch2_initialize_subvolumes(struct bch_fs *c)
-{
- struct bkey_i_snapshot_tree root_tree;
- struct bkey_i_snapshot root_snapshot;
- struct bkey_i_subvolume root_volume;
- int ret;
-
- bkey_snapshot_tree_init(&root_tree.k_i);
- root_tree.k.p.offset = 1;
- root_tree.v.master_subvol = cpu_to_le32(1);
- root_tree.v.root_snapshot = cpu_to_le32(U32_MAX);
-
- bkey_snapshot_init(&root_snapshot.k_i);
- root_snapshot.k.p.offset = U32_MAX;
- root_snapshot.v.flags = 0;
- root_snapshot.v.parent = 0;
- root_snapshot.v.subvol = cpu_to_le32(BCACHEFS_ROOT_SUBVOL);
- root_snapshot.v.tree = cpu_to_le32(1);
- SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true);
-
- bkey_subvolume_init(&root_volume.k_i);
- root_volume.k.p.offset = BCACHEFS_ROOT_SUBVOL;
- root_volume.v.flags = 0;
- root_volume.v.snapshot = cpu_to_le32(U32_MAX);
- root_volume.v.inode = cpu_to_le64(BCACHEFS_ROOT_INO);
-
- ret = bch2_btree_insert(c, BTREE_ID_snapshot_trees, &root_tree.k_i, NULL, 0) ?:
- bch2_btree_insert(c, BTREE_ID_snapshots, &root_snapshot.k_i, NULL, 0) ?:
- bch2_btree_insert(c, BTREE_ID_subvolumes, &root_volume.k_i, NULL, 0);
- bch_err_fn(c, ret);
- return ret;
-}
-
-static int __bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans)
-{
- struct btree_iter iter;
- struct bkey_s_c k;
- struct bch_inode_unpacked inode;
- int ret;
-
- k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
- SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0);
- ret = bkey_err(k);
- if (ret)
- return ret;
-
- if (!bkey_is_inode(k.k)) {
- bch_err(trans->c, "root inode not found");
- ret = -BCH_ERR_ENOENT_inode;
- goto err;
- }
-
- ret = bch2_inode_unpack(k, &inode);
- BUG_ON(ret);
-
- inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
-
- ret = bch2_inode_write(trans, &iter, &inode);
-err:
- bch2_trans_iter_exit(trans, &iter);
- return ret;
-}
-
-/* set bi_subvol on root inode */
-noinline_for_stack
-static int bch2_fs_upgrade_for_subvolumes(struct bch_fs *c)
-{
- int ret = bch2_trans_do(c, NULL, NULL, BCH_TRANS_COMMIT_lazy_rw,
- __bch2_fs_upgrade_for_subvolumes(trans));
- bch_err_fn(c, ret);
- return ret;
-}
-
-const char * const bch2_recovery_passes[] = {
-#define x(_fn, ...) #_fn,
- BCH_RECOVERY_PASSES()
-#undef x
- NULL
-};
-
-static int bch2_check_allocations(struct bch_fs *c)
-{
- return bch2_gc(c, true, c->opts.norecovery);
-}
-
-static int bch2_set_may_go_rw(struct bch_fs *c)
-{
- struct journal_keys *keys = &c->journal_keys;
-
- /*
- * After we go RW, the journal keys buffer can't be modified (except for
- * setting journal_key->overwritten: it will be accessed by multiple
- * threads
- */
- move_gap(keys, keys->nr);
-
- set_bit(BCH_FS_may_go_rw, &c->flags);
-
- if (keys->nr || c->opts.fsck || !c->sb.clean)
- return bch2_fs_read_write_early(c);
- return 0;
-}
-
-struct recovery_pass_fn {
- int (*fn)(struct bch_fs *);
- unsigned when;
-};
-
-static struct recovery_pass_fn recovery_pass_fns[] = {
-#define x(_fn, _id, _when) { .fn = bch2_##_fn, .when = _when },
- BCH_RECOVERY_PASSES()
-#undef x
-};
-
-u64 bch2_recovery_passes_to_stable(u64 v)
-{
- static const u8 map[] = {
-#define x(n, id, ...) [BCH_RECOVERY_PASS_##n] = BCH_RECOVERY_PASS_STABLE_##n,
- BCH_RECOVERY_PASSES()
-#undef x
- };
-
- u64 ret = 0;
- for (unsigned i = 0; i < ARRAY_SIZE(map); i++)
- if (v & BIT_ULL(i))
- ret |= BIT_ULL(map[i]);
- return ret;
-}
-
-u64 bch2_recovery_passes_from_stable(u64 v)
-{
- static const u8 map[] = {
-#define x(n, id, ...) [BCH_RECOVERY_PASS_STABLE_##n] = BCH_RECOVERY_PASS_##n,
- BCH_RECOVERY_PASSES()
-#undef x
- };
-
- u64 ret = 0;
- for (unsigned i = 0; i < ARRAY_SIZE(map); i++)
- if (v & BIT_ULL(i))
- ret |= BIT_ULL(map[i]);
- return ret;
-}
-
static bool check_version_upgrade(struct bch_fs *c)
{
unsigned latest_version = bcachefs_metadata_version_current;
@@ -687,96 +577,6 @@ static bool check_version_upgrade(struct bch_fs *c)
return false;
}
-u64 bch2_fsck_recovery_passes(void)
-{
- u64 ret = 0;
-
- for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++)
- if (recovery_pass_fns[i].when & PASS_FSCK)
- ret |= BIT_ULL(i);
- return ret;
-}
-
-static bool should_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
-{
- struct recovery_pass_fn *p = recovery_pass_fns + pass;
-
- if (c->opts.norecovery && pass > BCH_RECOVERY_PASS_snapshots_read)
- return false;
- if (c->recovery_passes_explicit & BIT_ULL(pass))
- return true;
- if ((p->when & PASS_FSCK) && c->opts.fsck)
- return true;
- if ((p->when & PASS_UNCLEAN) && !c->sb.clean)
- return true;
- if (p->when & PASS_ALWAYS)
- return true;
- return false;
-}
-
-static int bch2_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
-{
- struct recovery_pass_fn *p = recovery_pass_fns + pass;
- int ret;
-
- if (!(p->when & PASS_SILENT))
- bch2_print(c, KERN_INFO bch2_log_msg(c, "%s..."),
- bch2_recovery_passes[pass]);
- ret = p->fn(c);
- if (ret)
- return ret;
- if (!(p->when & PASS_SILENT))
- bch2_print(c, KERN_CONT " done\n");
-
- return 0;
-}
-
-static int bch2_run_recovery_passes(struct bch_fs *c)
-{
- int ret = 0;
-
- while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns)) {
- if (should_run_recovery_pass(c, c->curr_recovery_pass)) {
- unsigned pass = c->curr_recovery_pass;
-
- ret = bch2_run_recovery_pass(c, c->curr_recovery_pass);
- if (bch2_err_matches(ret, BCH_ERR_restart_recovery) ||
- (ret && c->curr_recovery_pass < pass))
- continue;
- if (ret)
- break;
-
- c->recovery_passes_complete |= BIT_ULL(c->curr_recovery_pass);
- }
- c->curr_recovery_pass++;
- c->recovery_pass_done = max(c->recovery_pass_done, c->curr_recovery_pass);
- }
-
- return ret;
-}
-
-int bch2_run_online_recovery_passes(struct bch_fs *c)
-{
- int ret = 0;
-
- for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++) {
- struct recovery_pass_fn *p = recovery_pass_fns + i;
-
- if (!(p->when & PASS_ONLINE))
- continue;
-
- ret = bch2_run_recovery_pass(c, i);
- if (bch2_err_matches(ret, BCH_ERR_restart_recovery)) {
- i = c->curr_recovery_pass;
- continue;
- }
- if (ret)
- break;
- }
-
- return ret;
-}
-
int bch2_fs_recovery(struct bch_fs *c)
{
struct bch_sb_field_clean *clean = NULL;
@@ -809,69 +609,57 @@ int bch2_fs_recovery(struct bch_fs *c)
goto err;
}
- if (c->opts.fsck && c->opts.norecovery) {
- bch_err(c, "cannot select both norecovery and fsck");
- ret = -EINVAL;
- goto err;
- }
+ if (c->opts.norecovery)
+ c->opts.recovery_pass_last = BCH_RECOVERY_PASS_journal_replay - 1;
- if (!c->opts.nochanges) {
- mutex_lock(&c->sb_lock);
- bool write_sb = false;
-
- struct bch_sb_field_ext *ext =
- bch2_sb_field_get_minsize(&c->disk_sb, ext, sizeof(*ext) / sizeof(u64));
- if (!ext) {
- ret = -BCH_ERR_ENOSPC_sb;
- mutex_unlock(&c->sb_lock);
- goto err;
- }
-
- if (BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb)) {
- ext->recovery_passes_required[0] |=
- cpu_to_le64(bch2_recovery_passes_to_stable(BIT_ULL(BCH_RECOVERY_PASS_check_topology)));
- write_sb = true;
- }
+ mutex_lock(&c->sb_lock);
+ struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
+ bool write_sb = false;
- u64 sb_passes = bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
- if (sb_passes) {
- struct printbuf buf = PRINTBUF;
- prt_str(&buf, "superblock requires following recovery passes to be run:\n ");
- prt_bitflags(&buf, bch2_recovery_passes, sb_passes);
- bch_info(c, "%s", buf.buf);
- printbuf_exit(&buf);
- }
+ if (BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb)) {
+ ext->recovery_passes_required[0] |=
+ cpu_to_le64(bch2_recovery_passes_to_stable(BIT_ULL(BCH_RECOVERY_PASS_check_topology)));
+ write_sb = true;
+ }
- if (bch2_check_version_downgrade(c)) {
- struct printbuf buf = PRINTBUF;
+ u64 sb_passes = bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
+ if (sb_passes) {
+ struct printbuf buf = PRINTBUF;
+ prt_str(&buf, "superblock requires following recovery passes to be run:\n ");
+ prt_bitflags(&buf, bch2_recovery_passes, sb_passes);
+ bch_info(c, "%s", buf.buf);
+ printbuf_exit(&buf);
+ }
- prt_str(&buf, "Version downgrade required:");
+ if (bch2_check_version_downgrade(c)) {
+ struct printbuf buf = PRINTBUF;
- __le64 passes = ext->recovery_passes_required[0];
- bch2_sb_set_downgrade(c,
- BCH_VERSION_MINOR(bcachefs_metadata_version_current),
- BCH_VERSION_MINOR(c->sb.version));
- passes = ext->recovery_passes_required[0] & ~passes;
- if (passes) {
- prt_str(&buf, "\n running recovery passes: ");
- prt_bitflags(&buf, bch2_recovery_passes,
- bch2_recovery_passes_from_stable(le64_to_cpu(passes)));
- }
+ prt_str(&buf, "Version downgrade required:");
- bch_info(c, "%s", buf.buf);
- printbuf_exit(&buf);
- write_sb = true;
+ __le64 passes = ext->recovery_passes_required[0];
+ bch2_sb_set_downgrade(c,
+ BCH_VERSION_MINOR(bcachefs_metadata_version_current),
+ BCH_VERSION_MINOR(c->sb.version));
+ passes = ext->recovery_passes_required[0] & ~passes;
+ if (passes) {
+ prt_str(&buf, "\n running recovery passes: ");
+ prt_bitflags(&buf, bch2_recovery_passes,
+ bch2_recovery_passes_from_stable(le64_to_cpu(passes)));
}
- if (check_version_upgrade(c))
- write_sb = true;
+ bch_info(c, "%s", buf.buf);
+ printbuf_exit(&buf);
+ write_sb = true;
+ }
- if (write_sb)
- bch2_write_super(c);
+ if (check_version_upgrade(c))
+ write_sb = true;
- c->recovery_passes_explicit |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
- mutex_unlock(&c->sb_lock);
- }
+ if (write_sb)
+ bch2_write_super(c);
+
+ c->recovery_passes_explicit |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
+ mutex_unlock(&c->sb_lock);
if (c->opts.fsck && IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_topology);
@@ -885,7 +673,9 @@ int bch2_fs_recovery(struct bch_fs *c)
goto err;
}
- if (!c->sb.clean || c->opts.fsck || c->opts.keep_journal) {
+ bch2_journal_pos_from_member_info_resume(c);
+
+ if (!c->sb.clean || c->opts.retain_recovery_info) {
struct genradix_iter iter;
struct journal_replay **i;
@@ -965,7 +755,7 @@ use_clean:
c->journal_replay_seq_end = blacklist_seq - 1;
if (c->opts.reconstruct_alloc)
- do_reconstruct_alloc(c);
+ bch2_reconstruct_alloc(c);
zero_out_btree_mem_ptr(&c->journal_keys);
@@ -1017,6 +807,12 @@ use_clean:
clear_bit(BCH_FS_fsck_running, &c->flags);
+ /* fsync if we fixed errors */
+ if (test_bit(BCH_FS_errors_fixed, &c->flags)) {
+ bch2_journal_flush_all_pins(&c->journal);
+ bch2_journal_meta(&c->journal);
+ }
+
/* If we fixed errors, verify that fs is actually clean now: */
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
test_bit(BCH_FS_errors_fixed, &c->flags) &&
@@ -1051,7 +847,8 @@ use_clean:
}
mutex_lock(&c->sb_lock);
- bool write_sb = false;
+ ext = bch2_sb_field_get(c->disk_sb.sb, ext);
+ write_sb = false;
if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) != le16_to_cpu(c->disk_sb.sb->version)) {
SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, le16_to_cpu(c->disk_sb.sb->version));
@@ -1064,15 +861,18 @@ use_clean:
write_sb = true;
}
- if (!test_bit(BCH_FS_error, &c->flags)) {
- struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
- if (ext &&
- (!bch2_is_zero(ext->recovery_passes_required, sizeof(ext->recovery_passes_required)) ||
- !bch2_is_zero(ext->errors_silent, sizeof(ext->errors_silent)))) {
- memset(ext->recovery_passes_required, 0, sizeof(ext->recovery_passes_required));
- memset(ext->errors_silent, 0, sizeof(ext->errors_silent));
- write_sb = true;
- }
+ if (!test_bit(BCH_FS_error, &c->flags) &&
+ !bch2_is_zero(ext->errors_silent, sizeof(ext->errors_silent))) {
+ memset(ext->errors_silent, 0, sizeof(ext->errors_silent));
+ write_sb = true;
+ }
+
+ if (c->opts.fsck &&
+ !test_bit(BCH_FS_error, &c->flags) &&
+ c->recovery_pass_done == BCH_RECOVERY_PASS_NR - 1 &&
+ ext->btrees_lost_data) {
+ ext->btrees_lost_data = 0;
+ write_sb = true;
}
if (c->opts.fsck &&
@@ -1083,6 +883,9 @@ use_clean:
write_sb = true;
}
+ if (bch2_blacklist_entries_gc(c))
+ write_sb = true;
+
if (write_sb)
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
@@ -1105,18 +908,16 @@ use_clean:
bch_info(c, "scanning for old btree nodes done");
}
- if (c->journal_seq_blacklist_table &&
- c->journal_seq_blacklist_table->nr > 128)
- queue_work(system_long_wq, &c->journal_seq_blacklist_gc_work);
-
ret = 0;
out:
bch2_flush_fsck_errs(c);
- if (!c->opts.keep_journal &&
- test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
+ if (!c->opts.retain_recovery_info) {
bch2_journal_keys_put_initial(c);
- kfree(clean);
+ bch2_find_btree_nodes_exit(&c->found_btree_nodes);
+ }
+ if (!IS_ERR(clean))
+ kfree(clean);
if (!ret &&
test_bit(BCH_FS_need_delete_dead_snapshots, &c->flags) &&
@@ -1141,6 +942,7 @@ int bch2_fs_initialize(struct bch_fs *c)
int ret;
bch_notice(c, "initializing new filesystem");
+ set_bit(BCH_FS_new_fs, &c->flags);
mutex_lock(&c->sb_lock);
c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done);
@@ -1155,11 +957,11 @@ int bch2_fs_initialize(struct bch_fs *c)
}
mutex_unlock(&c->sb_lock);
- c->curr_recovery_pass = ARRAY_SIZE(recovery_pass_fns);
+ c->curr_recovery_pass = BCH_RECOVERY_PASS_NR;
set_bit(BCH_FS_may_go_rw, &c->flags);
for (unsigned i = 0; i < BTREE_ID_NR; i++)
- bch2_btree_root_alloc(c, i);
+ bch2_btree_root_alloc_fake(c, i, 0);
for_each_member_device(c, ca)
bch2_dev_usage_init(ca);
@@ -1230,7 +1032,7 @@ int bch2_fs_initialize(struct bch_fs *c)
if (ret)
goto err;
- c->recovery_pass_done = ARRAY_SIZE(recovery_pass_fns) - 1;
+ c->recovery_pass_done = BCH_RECOVERY_PASS_NR - 1;
if (enabled_qtypes(c)) {
ret = bch2_fs_quota_read(c);
diff --git a/fs/bcachefs/recovery.h b/fs/bcachefs/recovery.h
index 4e9d24719b2e..4bf818de1f2f 100644
--- a/fs/bcachefs/recovery.h
+++ b/fs/bcachefs/recovery.h
@@ -2,37 +2,9 @@
#ifndef _BCACHEFS_RECOVERY_H
#define _BCACHEFS_RECOVERY_H
-extern const char * const bch2_recovery_passes[];
+void bch2_btree_lost_data(struct bch_fs *, enum btree_id);
-u64 bch2_recovery_passes_to_stable(u64 v);
-u64 bch2_recovery_passes_from_stable(u64 v);
-
-/*
- * For when we need to rewind recovery passes and run a pass we skipped:
- */
-static inline int bch2_run_explicit_recovery_pass(struct bch_fs *c,
- enum bch_recovery_pass pass)
-{
- if (c->recovery_passes_explicit & BIT_ULL(pass))
- return 0;
-
- bch_info(c, "running explicit recovery pass %s (%u), currently at %s (%u)",
- bch2_recovery_passes[pass], pass,
- bch2_recovery_passes[c->curr_recovery_pass], c->curr_recovery_pass);
-
- c->recovery_passes_explicit |= BIT_ULL(pass);
-
- if (c->curr_recovery_pass >= pass) {
- c->curr_recovery_pass = pass;
- c->recovery_passes_complete &= (1ULL << pass) >> 1;
- return -BCH_ERR_restart_recovery;
- } else {
- return 0;
- }
-}
-
-int bch2_run_online_recovery_passes(struct bch_fs *);
-u64 bch2_fsck_recovery_passes(void);
+int bch2_journal_replay(struct bch_fs *);
int bch2_fs_recovery(struct bch_fs *);
int bch2_fs_initialize(struct bch_fs *);
diff --git a/fs/bcachefs/recovery_passes.c b/fs/bcachefs/recovery_passes.c
new file mode 100644
index 000000000000..4a9eb9582b6e
--- /dev/null
+++ b/fs/bcachefs/recovery_passes.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "alloc_background.h"
+#include "backpointers.h"
+#include "btree_gc.h"
+#include "btree_node_scan.h"
+#include "ec.h"
+#include "fsck.h"
+#include "inode.h"
+#include "journal.h"
+#include "lru.h"
+#include "logged_ops.h"
+#include "rebalance.h"
+#include "recovery.h"
+#include "recovery_passes.h"
+#include "snapshot.h"
+#include "subvolume.h"
+#include "super.h"
+#include "super-io.h"
+
+const char * const bch2_recovery_passes[] = {
+#define x(_fn, ...) #_fn,
+ BCH_RECOVERY_PASSES()
+#undef x
+ NULL
+};
+
+static int bch2_set_may_go_rw(struct bch_fs *c)
+{
+ struct journal_keys *keys = &c->journal_keys;
+
+ /*
+ * After we go RW, the journal keys buffer can't be modified (except for
+ * setting journal_key->overwritten: it will be accessed by multiple
+ * threads
+ */
+ move_gap(keys, keys->nr);
+
+ set_bit(BCH_FS_may_go_rw, &c->flags);
+
+ if (keys->nr || c->opts.fsck || !c->sb.clean || c->recovery_passes_explicit)
+ return bch2_fs_read_write_early(c);
+ return 0;
+}
+
+struct recovery_pass_fn {
+ int (*fn)(struct bch_fs *);
+ unsigned when;
+};
+
+static struct recovery_pass_fn recovery_pass_fns[] = {
+#define x(_fn, _id, _when) { .fn = bch2_##_fn, .when = _when },
+ BCH_RECOVERY_PASSES()
+#undef x
+};
+
+static const u8 passes_to_stable_map[] = {
+#define x(n, id, ...) [BCH_RECOVERY_PASS_##n] = BCH_RECOVERY_PASS_STABLE_##n,
+ BCH_RECOVERY_PASSES()
+#undef x
+};
+
+static enum bch_recovery_pass_stable bch2_recovery_pass_to_stable(enum bch_recovery_pass pass)
+{
+ return passes_to_stable_map[pass];
+}
+
+u64 bch2_recovery_passes_to_stable(u64 v)
+{
+ u64 ret = 0;
+ for (unsigned i = 0; i < ARRAY_SIZE(passes_to_stable_map); i++)
+ if (v & BIT_ULL(i))
+ ret |= BIT_ULL(passes_to_stable_map[i]);
+ return ret;
+}
+
+u64 bch2_recovery_passes_from_stable(u64 v)
+{
+ static const u8 map[] = {
+#define x(n, id, ...) [BCH_RECOVERY_PASS_STABLE_##n] = BCH_RECOVERY_PASS_##n,
+ BCH_RECOVERY_PASSES()
+#undef x
+ };
+
+ u64 ret = 0;
+ for (unsigned i = 0; i < ARRAY_SIZE(map); i++)
+ if (v & BIT_ULL(i))
+ ret |= BIT_ULL(map[i]);
+ return ret;
+}
+
+/*
+ * For when we need to rewind recovery passes and run a pass we skipped:
+ */
+int bch2_run_explicit_recovery_pass(struct bch_fs *c,
+ enum bch_recovery_pass pass)
+{
+ if (c->recovery_passes_explicit & BIT_ULL(pass))
+ return 0;
+
+ bch_info(c, "running explicit recovery pass %s (%u), currently at %s (%u)",
+ bch2_recovery_passes[pass], pass,
+ bch2_recovery_passes[c->curr_recovery_pass], c->curr_recovery_pass);
+
+ c->recovery_passes_explicit |= BIT_ULL(pass);
+
+ if (c->curr_recovery_pass >= pass) {
+ c->curr_recovery_pass = pass;
+ c->recovery_passes_complete &= (1ULL << pass) >> 1;
+ return -BCH_ERR_restart_recovery;
+ } else {
+ return 0;
+ }
+}
+
+int bch2_run_explicit_recovery_pass_persistent(struct bch_fs *c,
+ enum bch_recovery_pass pass)
+{
+ enum bch_recovery_pass_stable s = bch2_recovery_pass_to_stable(pass);
+
+ mutex_lock(&c->sb_lock);
+ struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
+
+ if (!test_bit_le64(s, ext->recovery_passes_required)) {
+ __set_bit_le64(s, ext->recovery_passes_required);
+ bch2_write_super(c);
+ }
+ mutex_unlock(&c->sb_lock);
+
+ return bch2_run_explicit_recovery_pass(c, pass);
+}
+
+static void bch2_clear_recovery_pass_required(struct bch_fs *c,
+ enum bch_recovery_pass pass)
+{
+ enum bch_recovery_pass_stable s = bch2_recovery_pass_to_stable(pass);
+
+ mutex_lock(&c->sb_lock);
+ struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
+
+ if (test_bit_le64(s, ext->recovery_passes_required)) {
+ __clear_bit_le64(s, ext->recovery_passes_required);
+ bch2_write_super(c);
+ }
+ mutex_unlock(&c->sb_lock);
+}
+
+u64 bch2_fsck_recovery_passes(void)
+{
+ u64 ret = 0;
+
+ for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++)
+ if (recovery_pass_fns[i].when & PASS_FSCK)
+ ret |= BIT_ULL(i);
+ return ret;
+}
+
+static bool should_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
+{
+ struct recovery_pass_fn *p = recovery_pass_fns + pass;
+
+ if (c->recovery_passes_explicit & BIT_ULL(pass))
+ return true;
+ if ((p->when & PASS_FSCK) && c->opts.fsck)
+ return true;
+ if ((p->when & PASS_UNCLEAN) && !c->sb.clean)
+ return true;
+ if (p->when & PASS_ALWAYS)
+ return true;
+ return false;
+}
+
+static int bch2_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
+{
+ struct recovery_pass_fn *p = recovery_pass_fns + pass;
+ int ret;
+
+ if (!(p->when & PASS_SILENT))
+ bch2_print(c, KERN_INFO bch2_log_msg(c, "%s..."),
+ bch2_recovery_passes[pass]);
+ ret = p->fn(c);
+ if (ret)
+ return ret;
+ if (!(p->when & PASS_SILENT))
+ bch2_print(c, KERN_CONT " done\n");
+
+ return 0;
+}
+
+int bch2_run_online_recovery_passes(struct bch_fs *c)
+{
+ int ret = 0;
+
+ for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++) {
+ struct recovery_pass_fn *p = recovery_pass_fns + i;
+
+ if (!(p->when & PASS_ONLINE))
+ continue;
+
+ ret = bch2_run_recovery_pass(c, i);
+ if (bch2_err_matches(ret, BCH_ERR_restart_recovery)) {
+ i = c->curr_recovery_pass;
+ continue;
+ }
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+int bch2_run_recovery_passes(struct bch_fs *c)
+{
+ int ret = 0;
+
+ while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns)) {
+ if (c->opts.recovery_pass_last &&
+ c->curr_recovery_pass > c->opts.recovery_pass_last)
+ break;
+
+ if (should_run_recovery_pass(c, c->curr_recovery_pass)) {
+ unsigned pass = c->curr_recovery_pass;
+
+ ret = bch2_run_recovery_pass(c, c->curr_recovery_pass) ?:
+ bch2_journal_flush(&c->journal);
+ if (bch2_err_matches(ret, BCH_ERR_restart_recovery) ||
+ (ret && c->curr_recovery_pass < pass))
+ continue;
+ if (ret)
+ break;
+
+ c->recovery_passes_complete |= BIT_ULL(c->curr_recovery_pass);
+ }
+
+ c->recovery_pass_done = max(c->recovery_pass_done, c->curr_recovery_pass);
+
+ if (!test_bit(BCH_FS_error, &c->flags))
+ bch2_clear_recovery_pass_required(c, c->curr_recovery_pass);
+
+ c->curr_recovery_pass++;
+ }
+
+ return ret;
+}
diff --git a/fs/bcachefs/recovery_passes.h b/fs/bcachefs/recovery_passes.h
new file mode 100644
index 000000000000..99b464e127b8
--- /dev/null
+++ b/fs/bcachefs/recovery_passes.h
@@ -0,0 +1,17 @@
+#ifndef _BCACHEFS_RECOVERY_PASSES_H
+#define _BCACHEFS_RECOVERY_PASSES_H
+
+extern const char * const bch2_recovery_passes[];
+
+u64 bch2_recovery_passes_to_stable(u64 v);
+u64 bch2_recovery_passes_from_stable(u64 v);
+
+u64 bch2_fsck_recovery_passes(void);
+
+int bch2_run_explicit_recovery_pass(struct bch_fs *, enum bch_recovery_pass);
+int bch2_run_explicit_recovery_pass_persistent(struct bch_fs *, enum bch_recovery_pass);
+
+int bch2_run_online_recovery_passes(struct bch_fs *);
+int bch2_run_recovery_passes(struct bch_fs *);
+
+#endif /* _BCACHEFS_RECOVERY_PASSES_H */
diff --git a/fs/bcachefs/recovery_types.h b/fs/bcachefs/recovery_passes_types.h
index 4959e95e7c74..773aea9a0080 100644
--- a/fs/bcachefs/recovery_types.h
+++ b/fs/bcachefs/recovery_passes_types.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _BCACHEFS_RECOVERY_TYPES_H
-#define _BCACHEFS_RECOVERY_TYPES_H
+#ifndef _BCACHEFS_RECOVERY_PASSES_TYPES_H
+#define _BCACHEFS_RECOVERY_PASSES_TYPES_H
#define PASS_SILENT BIT(0)
#define PASS_FSCK BIT(1)
@@ -13,6 +13,7 @@
* must never change:
*/
#define BCH_RECOVERY_PASSES() \
+ x(scan_for_btree_nodes, 37, 0) \
x(check_topology, 4, 0) \
x(alloc_read, 0, PASS_ALWAYS) \
x(stripes_read, 1, PASS_ALWAYS) \
@@ -31,13 +32,13 @@
x(check_alloc_to_lru_refs, 15, PASS_ONLINE|PASS_FSCK) \
x(fs_freespace_init, 16, PASS_ALWAYS|PASS_SILENT) \
x(bucket_gens_init, 17, 0) \
+ x(reconstruct_snapshots, 38, 0) \
x(check_snapshot_trees, 18, PASS_ONLINE|PASS_FSCK) \
x(check_snapshots, 19, PASS_ONLINE|PASS_FSCK) \
x(check_subvols, 20, PASS_ONLINE|PASS_FSCK) \
x(check_subvol_children, 35, PASS_ONLINE|PASS_FSCK) \
x(delete_dead_snapshots, 21, PASS_ONLINE|PASS_FSCK) \
x(fs_upgrade_for_subvolumes, 22, 0) \
- x(resume_logged_ops, 23, PASS_ALWAYS) \
x(check_inodes, 24, PASS_FSCK) \
x(check_extents, 25, PASS_FSCK) \
x(check_indirect_extents, 26, PASS_FSCK) \
@@ -47,6 +48,7 @@
x(check_subvolume_structure, 36, PASS_ONLINE|PASS_FSCK) \
x(check_directory_structure, 30, PASS_ONLINE|PASS_FSCK) \
x(check_nlinks, 31, PASS_FSCK) \
+ x(resume_logged_ops, 23, PASS_ALWAYS) \
x(delete_dead_inodes, 32, PASS_FSCK|PASS_UNCLEAN) \
x(fix_reflink_p, 33, 0) \
x(set_fs_needs_rebalance, 34, 0) \
@@ -56,6 +58,7 @@ enum bch_recovery_pass {
#define x(n, id, when) BCH_RECOVERY_PASS_##n,
BCH_RECOVERY_PASSES()
#undef x
+ BCH_RECOVERY_PASS_NR
};
/* But we also need stable identifiers that can be used in the superblock */
@@ -65,4 +68,4 @@ enum bch_recovery_pass_stable {
#undef x
};
-#endif /* _BCACHEFS_RECOVERY_TYPES_H */
+#endif /* _BCACHEFS_RECOVERY_PASSES_TYPES_H */
diff --git a/fs/bcachefs/reflink.c b/fs/bcachefs/reflink.c
index c47c66c2b394..9ac6cf21cfbf 100644
--- a/fs/bcachefs/reflink.c
+++ b/fs/bcachefs/reflink.c
@@ -30,7 +30,7 @@ static inline unsigned bkey_type_to_indirect(const struct bkey *k)
/* reflink pointers */
int bch2_reflink_p_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
struct printbuf *err)
{
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
@@ -74,20 +74,20 @@ bool bch2_reflink_p_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r
}
static int trans_trigger_reflink_p_segment(struct btree_trans *trans,
- struct bkey_s_c_reflink_p p,
- u64 *idx, unsigned flags)
+ struct bkey_s_c_reflink_p p, u64 *idx,
+ enum btree_iter_update_trigger_flags flags)
{
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_i *k;
__le64 *refcount;
- int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
+ int add = !(flags & BTREE_TRIGGER_overwrite) ? 1 : -1;
struct printbuf buf = PRINTBUF;
int ret;
k = bch2_bkey_get_mut_noupdate(trans, &iter,
BTREE_ID_reflink, POS(0, *idx),
- BTREE_ITER_WITH_UPDATES);
+ BTREE_ITER_with_updates);
ret = PTR_ERR_OR_ZERO(k);
if (ret)
goto err;
@@ -102,7 +102,7 @@ static int trans_trigger_reflink_p_segment(struct btree_trans *trans,
goto err;
}
- if (!*refcount && (flags & BTREE_TRIGGER_OVERWRITE)) {
+ if (!*refcount && (flags & BTREE_TRIGGER_overwrite)) {
bch2_bkey_val_to_text(&buf, c, p.s_c);
bch2_trans_inconsistent(trans,
"indirect extent refcount underflow at %llu while marking\n %s",
@@ -111,7 +111,7 @@ static int trans_trigger_reflink_p_segment(struct btree_trans *trans,
goto err;
}
- if (flags & BTREE_TRIGGER_INSERT) {
+ if (flags & BTREE_TRIGGER_insert) {
struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
u64 pad;
@@ -141,12 +141,13 @@ err:
}
static s64 gc_trigger_reflink_p_segment(struct btree_trans *trans,
- struct bkey_s_c_reflink_p p,
- u64 *idx, unsigned flags, size_t r_idx)
+ struct bkey_s_c_reflink_p p, u64 *idx,
+ enum btree_iter_update_trigger_flags flags,
+ size_t r_idx)
{
struct bch_fs *c = trans->c;
struct reflink_gc *r;
- int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
+ int add = !(flags & BTREE_TRIGGER_overwrite) ? 1 : -1;
u64 start = le64_to_cpu(p.v->idx);
u64 end = le64_to_cpu(p.v->idx) + p.k->size;
u64 next_idx = end + le32_to_cpu(p.v->back_pad);
@@ -163,10 +164,13 @@ static s64 gc_trigger_reflink_p_segment(struct btree_trans *trans,
BUG_ON((s64) r->refcount + add < 0);
- r->refcount += add;
+ if (flags & BTREE_TRIGGER_gc)
+ r->refcount += add;
*idx = r->offset;
return 0;
not_found:
+ BUG_ON(!(flags & BTREE_TRIGGER_check_repair));
+
if (fsck_err(c, reflink_p_to_missing_reflink_v,
"pointer to missing indirect extent\n"
" %s\n"
@@ -185,12 +189,11 @@ not_found:
} else {
bkey_error_init(update);
update->k.p = p.k->p;
- update->k.p.offset = next_idx;
- update->k.size = next_idx - *idx;
+ update->k.size = p.k->size;
set_bkey_val_u64s(&update->k, 0);
}
- ret = bch2_btree_insert_trans(trans, BTREE_ID_extents, update, BTREE_TRIGGER_NORUN);
+ ret = bch2_btree_insert_trans(trans, BTREE_ID_extents, update, BTREE_TRIGGER_norun);
}
*idx = next_idx;
@@ -201,8 +204,8 @@ fsck_err:
}
static int __trigger_reflink_p(struct btree_trans *trans,
- enum btree_id btree_id, unsigned level,
- struct bkey_s_c k, unsigned flags)
+ enum btree_id btree_id, unsigned level, struct bkey_s_c k,
+ enum btree_iter_update_trigger_flags flags)
{
struct bch_fs *c = trans->c;
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
@@ -211,12 +214,12 @@ static int __trigger_reflink_p(struct btree_trans *trans,
u64 idx = le64_to_cpu(p.v->idx) - le32_to_cpu(p.v->front_pad);
u64 end = le64_to_cpu(p.v->idx) + p.k->size + le32_to_cpu(p.v->back_pad);
- if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
+ if (flags & BTREE_TRIGGER_transactional) {
while (idx < end && !ret)
ret = trans_trigger_reflink_p_segment(trans, p, &idx, flags);
}
- if (flags & BTREE_TRIGGER_GC) {
+ if (flags & (BTREE_TRIGGER_check_repair|BTREE_TRIGGER_gc)) {
size_t l = 0, r = c->reflink_gc_nr;
while (l < r) {
@@ -239,10 +242,10 @@ int bch2_trigger_reflink_p(struct btree_trans *trans,
enum btree_id btree_id, unsigned level,
struct bkey_s_c old,
struct bkey_s new,
- unsigned flags)
+ enum btree_iter_update_trigger_flags flags)
{
- if ((flags & BTREE_TRIGGER_TRANSACTIONAL) &&
- (flags & BTREE_TRIGGER_INSERT)) {
+ if ((flags & BTREE_TRIGGER_transactional) &&
+ (flags & BTREE_TRIGGER_insert)) {
struct bch_reflink_p *v = bkey_s_to_reflink_p(new).v;
v->front_pad = v->back_pad = 0;
@@ -254,7 +257,7 @@ int bch2_trigger_reflink_p(struct btree_trans *trans,
/* indirect extents */
int bch2_reflink_v_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
struct printbuf *err)
{
return bch2_bkey_ptrs_invalid(c, k, flags, err);
@@ -282,23 +285,25 @@ bool bch2_reflink_v_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r
}
#endif
-static inline void check_indirect_extent_deleting(struct bkey_s new, unsigned *flags)
+static inline void
+check_indirect_extent_deleting(struct bkey_s new,
+ enum btree_iter_update_trigger_flags *flags)
{
- if ((*flags & BTREE_TRIGGER_INSERT) && !*bkey_refcount(new)) {
+ if ((*flags & BTREE_TRIGGER_insert) && !*bkey_refcount(new)) {
new.k->type = KEY_TYPE_deleted;
new.k->size = 0;
set_bkey_val_u64s(new.k, 0);
- *flags &= ~BTREE_TRIGGER_INSERT;
+ *flags &= ~BTREE_TRIGGER_insert;
}
}
int bch2_trigger_reflink_v(struct btree_trans *trans,
enum btree_id btree_id, unsigned level,
struct bkey_s_c old, struct bkey_s new,
- unsigned flags)
+ enum btree_iter_update_trigger_flags flags)
{
- if ((flags & BTREE_TRIGGER_TRANSACTIONAL) &&
- (flags & BTREE_TRIGGER_INSERT))
+ if ((flags & BTREE_TRIGGER_transactional) &&
+ (flags & BTREE_TRIGGER_insert))
check_indirect_extent_deleting(new, &flags);
return bch2_trigger_extent(trans, btree_id, level, old, new, flags);
@@ -307,7 +312,7 @@ int bch2_trigger_reflink_v(struct btree_trans *trans,
/* indirect inline data */
int bch2_indirect_inline_data_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
struct printbuf *err)
{
return 0;
@@ -327,7 +332,7 @@ void bch2_indirect_inline_data_to_text(struct printbuf *out,
int bch2_trigger_indirect_inline_data(struct btree_trans *trans,
enum btree_id btree_id, unsigned level,
struct bkey_s_c old, struct bkey_s new,
- unsigned flags)
+ enum btree_iter_update_trigger_flags flags)
{
check_indirect_extent_deleting(new, &flags);
@@ -350,7 +355,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
bch2_check_set_feature(c, BCH_FEATURE_reflink_inline_data);
bch2_trans_iter_init(trans, &reflink_iter, BTREE_ID_reflink, POS_MAX,
- BTREE_ITER_INTENT);
+ BTREE_ITER_intent);
k = bch2_btree_iter_peek_prev(&reflink_iter);
ret = bkey_err(k);
if (ret)
@@ -395,7 +400,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
r_p->v.idx = cpu_to_le64(bkey_start_offset(&r_v->k));
ret = bch2_trans_update(trans, extent_iter, &r_p->k_i,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ BTREE_UPDATE_internal_snapshot_node);
err:
bch2_trans_iter_exit(trans, &reflink_iter);
@@ -456,9 +461,9 @@ s64 bch2_remap_range(struct bch_fs *c,
goto err;
bch2_trans_iter_init(trans, &src_iter, BTREE_ID_extents, src_start,
- BTREE_ITER_INTENT);
+ BTREE_ITER_intent);
bch2_trans_iter_init(trans, &dst_iter, BTREE_ID_extents, dst_start,
- BTREE_ITER_INTENT);
+ BTREE_ITER_intent);
while ((ret == 0 ||
bch2_err_matches(ret, BCH_ERR_transaction_restart)) &&
@@ -568,7 +573,7 @@ s64 bch2_remap_range(struct bch_fs *c,
bch2_trans_begin(trans);
ret2 = bch2_inode_peek(trans, &inode_iter, &inode_u,
- dst_inum, BTREE_ITER_INTENT);
+ dst_inum, BTREE_ITER_intent);
if (!ret2 &&
inode_u.bi_size < new_i_size) {
diff --git a/fs/bcachefs/reflink.h b/fs/bcachefs/reflink.h
index 4d8867289717..e894f3a2c67a 100644
--- a/fs/bcachefs/reflink.h
+++ b/fs/bcachefs/reflink.h
@@ -2,15 +2,16 @@
#ifndef _BCACHEFS_REFLINK_H
#define _BCACHEFS_REFLINK_H
-enum bkey_invalid_flags;
+enum bch_validate_flags;
int bch2_reflink_p_invalid(struct bch_fs *, struct bkey_s_c,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
void bch2_reflink_p_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
bool bch2_reflink_p_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
int bch2_trigger_reflink_p(struct btree_trans *, enum btree_id, unsigned,
- struct bkey_s_c, struct bkey_s, unsigned);
+ struct bkey_s_c, struct bkey_s,
+ enum btree_iter_update_trigger_flags);
#define bch2_bkey_ops_reflink_p ((struct bkey_ops) { \
.key_invalid = bch2_reflink_p_invalid, \
@@ -21,11 +22,12 @@ int bch2_trigger_reflink_p(struct btree_trans *, enum btree_id, unsigned,
})
int bch2_reflink_v_invalid(struct bch_fs *, struct bkey_s_c,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
void bch2_reflink_v_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
int bch2_trigger_reflink_v(struct btree_trans *, enum btree_id, unsigned,
- struct bkey_s_c, struct bkey_s, unsigned);
+ struct bkey_s_c, struct bkey_s,
+ enum btree_iter_update_trigger_flags);
#define bch2_bkey_ops_reflink_v ((struct bkey_ops) { \
.key_invalid = bch2_reflink_v_invalid, \
@@ -36,13 +38,13 @@ int bch2_trigger_reflink_v(struct btree_trans *, enum btree_id, unsigned,
})
int bch2_indirect_inline_data_invalid(struct bch_fs *, struct bkey_s_c,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
void bch2_indirect_inline_data_to_text(struct printbuf *,
struct bch_fs *, struct bkey_s_c);
int bch2_trigger_indirect_inline_data(struct btree_trans *,
enum btree_id, unsigned,
struct bkey_s_c, struct bkey_s,
- unsigned);
+ enum btree_iter_update_trigger_flags);
#define bch2_bkey_ops_indirect_inline_data ((struct bkey_ops) { \
.key_invalid = bch2_indirect_inline_data_invalid, \
diff --git a/fs/bcachefs/replicas.c b/fs/bcachefs/replicas.c
index cc2672c12031..bd1d5d085e23 100644
--- a/fs/bcachefs/replicas.c
+++ b/fs/bcachefs/replicas.c
@@ -6,12 +6,15 @@
#include "replicas.h"
#include "super-io.h"
+#include <linux/sort.h>
+
static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *,
struct bch_replicas_cpu *);
/* Some (buggy!) compilers don't allow memcmp to be passed as a pointer */
-static int bch2_memcmp(const void *l, const void *r, size_t size)
+static int bch2_memcmp(const void *l, const void *r, const void *priv)
{
+ size_t size = (size_t) priv;
return memcmp(l, r, size);
}
@@ -39,7 +42,8 @@ void bch2_replicas_entry_sort(struct bch_replicas_entry_v1 *e)
static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
{
- eytzinger0_sort(r->entries, r->nr, r->entry_size, bch2_memcmp, NULL);
+ eytzinger0_sort_r(r->entries, r->nr, r->entry_size,
+ bch2_memcmp, NULL, (void *)(size_t)r->entry_size);
}
static void bch2_replicas_entry_v0_to_text(struct printbuf *out,
@@ -80,7 +84,7 @@ int bch2_replicas_entry_validate(struct bch_replicas_entry_v1 *r,
}
for (unsigned i = 0; i < r->nr_devs; i++)
- if (!bch2_dev_exists(sb, r->devs[i])) {
+ if (!bch2_member_exists(sb, r->devs[i])) {
prt_printf(err, "invalid device %u in entry ", r->devs[i]);
goto bad;
}
@@ -196,7 +200,7 @@ cpu_replicas_add_entry(struct bch_fs *c,
};
for (i = 0; i < new_entry->nr_devs; i++)
- BUG_ON(!bch2_dev_exists2(c, new_entry->devs[i]));
+ BUG_ON(!bch2_dev_exists(c, new_entry->devs[i]));
BUG_ON(!new_entry->data_type);
verify_replicas_entry(new_entry);
@@ -228,7 +232,7 @@ static inline int __replicas_entry_idx(struct bch_replicas_cpu *r,
verify_replicas_entry(search);
-#define entry_cmp(_l, _r, size) memcmp(_l, _r, entry_size)
+#define entry_cmp(_l, _r) memcmp(_l, _r, entry_size)
idx = eytzinger0_find(r->entries, r->nr, r->entry_size,
entry_cmp, search);
#undef entry_cmp
@@ -824,10 +828,11 @@ static int bch2_cpu_replicas_validate(struct bch_replicas_cpu *cpu_r,
{
unsigned i;
- sort_cmp_size(cpu_r->entries,
- cpu_r->nr,
- cpu_r->entry_size,
- bch2_memcmp, NULL);
+ sort_r(cpu_r->entries,
+ cpu_r->nr,
+ cpu_r->entry_size,
+ bch2_memcmp, NULL,
+ (void *)(size_t)cpu_r->entry_size);
for (i = 0; i < cpu_r->nr; i++) {
struct bch_replicas_entry_v1 *e =
@@ -855,7 +860,7 @@ static int bch2_cpu_replicas_validate(struct bch_replicas_cpu *cpu_r,
}
static int bch2_sb_replicas_validate(struct bch_sb *sb, struct bch_sb_field *f,
- struct printbuf *err)
+ enum bch_validate_flags flags, struct printbuf *err)
{
struct bch_sb_field_replicas *sb_r = field_to_type(f, replicas);
struct bch_replicas_cpu cpu_r;
@@ -894,7 +899,7 @@ const struct bch_sb_field_ops bch_sb_field_ops_replicas = {
};
static int bch2_sb_replicas_v0_validate(struct bch_sb *sb, struct bch_sb_field *f,
- struct printbuf *err)
+ enum bch_validate_flags flags, struct printbuf *err)
{
struct bch_sb_field_replicas_v0 *sb_r = field_to_type(f, replicas_v0);
struct bch_replicas_cpu cpu_r;
@@ -942,18 +947,20 @@ bool bch2_have_enough_devs(struct bch_fs *c, struct bch_devs_mask devs,
percpu_down_read(&c->mark_lock);
for_each_cpu_replicas_entry(&c->replicas, e) {
- unsigned i, nr_online = 0, nr_failed = 0, dflags = 0;
+ unsigned nr_online = 0, nr_failed = 0, dflags = 0;
bool metadata = e->data_type < BCH_DATA_user;
if (e->data_type == BCH_DATA_cached)
continue;
- for (i = 0; i < e->nr_devs; i++) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, e->devs[i]);
-
+ rcu_read_lock();
+ for (unsigned i = 0; i < e->nr_devs; i++) {
nr_online += test_bit(e->devs[i], devs.d);
- nr_failed += ca->mi.state == BCH_MEMBER_STATE_failed;
+
+ struct bch_dev *ca = bch2_dev_rcu(c, e->devs[i]);
+ nr_failed += ca && ca->mi.state == BCH_MEMBER_STATE_failed;
}
+ rcu_read_unlock();
if (nr_failed == e->nr_devs)
continue;
diff --git a/fs/bcachefs/sb-clean.c b/fs/bcachefs/sb-clean.c
index 5980ba2563fe..47f10ab57f40 100644
--- a/fs/bcachefs/sb-clean.c
+++ b/fs/bcachefs/sb-clean.c
@@ -29,6 +29,14 @@ int bch2_sb_clean_validate_late(struct bch_fs *c, struct bch_sb_field_clean *cle
for (entry = clean->start;
entry < (struct jset_entry *) vstruct_end(&clean->field);
entry = vstruct_next(entry)) {
+ if (vstruct_end(entry) > vstruct_end(&clean->field)) {
+ bch_err(c, "journal entry (u64s %u) overran end of superblock clean section (u64s %u) by %zu",
+ le16_to_cpu(entry->u64s), le32_to_cpu(clean->field.u64s),
+ (u64 *) vstruct_end(entry) - (u64 *) vstruct_end(&clean->field));
+ bch2_sb_error_count(c, BCH_FSCK_ERR_sb_clean_entry_overrun);
+ return -BCH_ERR_fsck_repair_unimplemented;
+ }
+
ret = bch2_journal_entry_validate(c, NULL, entry,
le16_to_cpu(c->disk_sb.sb->version),
BCH_SB_BIG_ENDIAN(c->disk_sb.sb),
@@ -258,9 +266,8 @@ void bch2_journal_super_entries_add_common(struct bch_fs *c,
}
}
-static int bch2_sb_clean_validate(struct bch_sb *sb,
- struct bch_sb_field *f,
- struct printbuf *err)
+static int bch2_sb_clean_validate(struct bch_sb *sb, struct bch_sb_field *f,
+ enum bch_validate_flags flags, struct printbuf *err)
{
struct bch_sb_field_clean *clean = field_to_type(f, clean);
@@ -270,6 +277,17 @@ static int bch2_sb_clean_validate(struct bch_sb *sb,
return -BCH_ERR_invalid_sb_clean;
}
+ for (struct jset_entry *entry = clean->start;
+ entry != vstruct_end(&clean->field);
+ entry = vstruct_next(entry)) {
+ if ((void *) vstruct_next(entry) > vstruct_end(&clean->field)) {
+ prt_str(err, "entry type ");
+ bch2_prt_jset_entry_type(err, entry->type);
+ prt_str(err, " overruns end of section");
+ return -BCH_ERR_invalid_sb_clean;
+ }
+ }
+
return 0;
}
@@ -279,14 +297,15 @@ static void bch2_sb_clean_to_text(struct printbuf *out, struct bch_sb *sb,
struct bch_sb_field_clean *clean = field_to_type(f, clean);
struct jset_entry *entry;
- prt_printf(out, "flags: %x", le32_to_cpu(clean->flags));
- prt_newline(out);
- prt_printf(out, "journal_seq: %llu", le64_to_cpu(clean->journal_seq));
- prt_newline(out);
+ prt_printf(out, "flags: %x\n", le32_to_cpu(clean->flags));
+ prt_printf(out, "journal_seq: %llu\n", le64_to_cpu(clean->journal_seq));
for (entry = clean->start;
entry != vstruct_end(&clean->field);
entry = vstruct_next(entry)) {
+ if ((void *) vstruct_next(entry) > vstruct_end(&clean->field))
+ break;
+
if (entry->type == BCH_JSET_ENTRY_btree_keys &&
!entry->u64s)
continue;
@@ -370,6 +389,8 @@ void bch2_fs_mark_clean(struct bch_fs *c)
goto out;
}
+ bch2_journal_pos_from_member_info_set(c);
+
bch2_write_super(c);
out:
mutex_unlock(&c->sb_lock);
diff --git a/fs/bcachefs/sb-counters.c b/fs/bcachefs/sb-counters.c
index 7dc898761bb3..6992e7469112 100644
--- a/fs/bcachefs/sb-counters.c
+++ b/fs/bcachefs/sb-counters.c
@@ -20,9 +20,8 @@ static size_t bch2_sb_counter_nr_entries(struct bch_sb_field_counters *ctrs)
return (__le64 *) vstruct_end(&ctrs->field) - &ctrs->d[0];
};
-static int bch2_sb_counters_validate(struct bch_sb *sb,
- struct bch_sb_field *f,
- struct printbuf *err)
+static int bch2_sb_counters_validate(struct bch_sb *sb, struct bch_sb_field *f,
+ enum bch_validate_flags flags, struct printbuf *err)
{
return 0;
};
@@ -31,19 +30,12 @@ static void bch2_sb_counters_to_text(struct printbuf *out, struct bch_sb *sb,
struct bch_sb_field *f)
{
struct bch_sb_field_counters *ctrs = field_to_type(f, counters);
- unsigned int i;
unsigned int nr = bch2_sb_counter_nr_entries(ctrs);
- for (i = 0; i < nr; i++) {
- if (i < BCH_COUNTER_NR)
- prt_printf(out, "%s ", bch2_counter_names[i]);
- else
- prt_printf(out, "(unknown)");
-
- prt_tab(out);
- prt_printf(out, "%llu", le64_to_cpu(ctrs->d[i]));
- prt_newline(out);
- }
+ for (unsigned i = 0; i < nr; i++)
+ prt_printf(out, "%s \t%llu\n",
+ i < BCH_COUNTER_NR ? bch2_counter_names[i] : "(unknown)",
+ le64_to_cpu(ctrs->d[i]));
};
int bch2_sb_counters_to_cpu(struct bch_fs *c)
diff --git a/fs/bcachefs/sb-downgrade.c b/fs/bcachefs/sb-downgrade.c
index e4396cb0bacb..390a1bbd2567 100644
--- a/fs/bcachefs/sb-downgrade.c
+++ b/fs/bcachefs/sb-downgrade.c
@@ -7,7 +7,7 @@
#include "bcachefs.h"
#include "darray.h"
-#include "recovery.h"
+#include "recovery_passes.h"
#include "sb-downgrade.h"
#include "sb-errors.h"
#include "super-io.h"
@@ -51,7 +51,10 @@
BCH_FSCK_ERR_subvol_fs_path_parent_wrong) \
x(btree_subvolume_children, \
BIT_ULL(BCH_RECOVERY_PASS_check_subvols), \
- BCH_FSCK_ERR_subvol_children_not_set)
+ BCH_FSCK_ERR_subvol_children_not_set) \
+ x(mi_btree_bitmap, \
+ BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
+ BCH_FSCK_ERR_btree_bitmap_not_marked)
#define DOWNGRADE_TABLE()
@@ -131,15 +134,25 @@ downgrade_entry_next_c(const struct bch_sb_field_downgrade_entry *e)
#define for_each_downgrade_entry(_d, _i) \
for (const struct bch_sb_field_downgrade_entry *_i = (_d)->entries; \
(void *) _i < vstruct_end(&(_d)->field) && \
- (void *) &_i->errors[0] < vstruct_end(&(_d)->field); \
+ (void *) &_i->errors[0] <= vstruct_end(&(_d)->field) && \
+ (void *) downgrade_entry_next_c(_i) <= vstruct_end(&(_d)->field); \
_i = downgrade_entry_next_c(_i))
static int bch2_sb_downgrade_validate(struct bch_sb *sb, struct bch_sb_field *f,
- struct printbuf *err)
+ enum bch_validate_flags flags, struct printbuf *err)
{
struct bch_sb_field_downgrade *e = field_to_type(f, downgrade);
- for_each_downgrade_entry(e, i) {
+ for (const struct bch_sb_field_downgrade_entry *i = e->entries;
+ (void *) i < vstruct_end(&e->field);
+ i = downgrade_entry_next_c(i)) {
+ if (flags & BCH_VALIDATE_write &&
+ ((void *) &i->errors[0] > vstruct_end(&e->field) ||
+ (void *) downgrade_entry_next_c(i) > vstruct_end(&e->field))) {
+ prt_printf(err, "downgrade entry overruns end of superblock section)");
+ return -BCH_ERR_invalid_sb_downgrade;
+ }
+
if (BCH_VERSION_MAJOR(le16_to_cpu(i->version)) !=
BCH_VERSION_MAJOR(le16_to_cpu(sb->version))) {
prt_printf(err, "downgrade entry with mismatched major version (%u != %u)",
@@ -161,19 +174,16 @@ static void bch2_sb_downgrade_to_text(struct printbuf *out, struct bch_sb *sb,
printbuf_tabstop_push(out, 16);
for_each_downgrade_entry(e, i) {
- prt_str(out, "version:");
- prt_tab(out);
+ prt_str(out, "version:\t");
bch2_version_to_text(out, le16_to_cpu(i->version));
prt_newline(out);
- prt_str(out, "recovery passes:");
- prt_tab(out);
+ prt_str(out, "recovery passes:\t");
prt_bitflags(out, bch2_recovery_passes,
bch2_recovery_passes_from_stable(le64_to_cpu(i->recovery_passes[0])));
prt_newline(out);
- prt_str(out, "errors:");
- prt_tab(out);
+ prt_str(out, "errors:\t");
bool first = true;
for (unsigned j = 0; j < le16_to_cpu(i->nr_errors); j++) {
if (!first)
diff --git a/fs/bcachefs/sb-errors.c b/fs/bcachefs/sb-errors.c
index 5f5bcae391fb..bda33e59e226 100644
--- a/fs/bcachefs/sb-errors.c
+++ b/fs/bcachefs/sb-errors.c
@@ -30,7 +30,7 @@ static inline unsigned bch2_sb_field_errors_u64s(unsigned nr)
}
static int bch2_sb_errors_validate(struct bch_sb *sb, struct bch_sb_field *f,
- struct printbuf *err)
+ enum bch_validate_flags flags, struct printbuf *err)
{
struct bch_sb_field_errors *e = field_to_type(f, errors);
unsigned i, nr = bch2_sb_field_errors_nr_entries(e);
diff --git a/fs/bcachefs/sb-errors_types.h b/fs/bcachefs/sb-errors_types.h
index 5178bf579f7c..87324747351a 100644
--- a/fs/bcachefs/sb-errors_types.h
+++ b/fs/bcachefs/sb-errors_types.h
@@ -130,7 +130,7 @@
x(bucket_gens_nonzero_for_invalid_buckets, 122) \
x(need_discard_freespace_key_to_invalid_dev_bucket, 123) \
x(need_discard_freespace_key_bad, 124) \
- x(backpointer_pos_wrong, 125) \
+ x(backpointer_bucket_offset_wrong, 125) \
x(backpointer_to_missing_device, 126) \
x(backpointer_to_missing_alloc, 127) \
x(backpointer_to_missing_ptr, 128) \
@@ -265,7 +265,15 @@
x(subvol_children_bad, 257) \
x(subvol_loop, 258) \
x(subvol_unreachable, 259) \
- x(btree_node_bkey_bad_u64s, 260)
+ x(btree_node_bkey_bad_u64s, 260) \
+ x(btree_node_topology_empty_interior_node, 261) \
+ x(btree_ptr_v2_min_key_bad, 262) \
+ x(btree_root_unreadable_and_scan_found_nothing, 263) \
+ x(snapshot_node_missing, 264) \
+ x(dup_backpointer_to_bad_csum_extent, 265) \
+ x(btree_bitmap_not_marked, 266) \
+ x(sb_clean_entry_overrun, 267) \
+ x(btree_ptr_v2_written_0, 268)
enum bch_sb_error_id {
#define x(t, n) BCH_FSCK_ERR_##t = n,
diff --git a/fs/bcachefs/sb-members.c b/fs/bcachefs/sb-members.c
index eff5ce18c69c..39196f2a4197 100644
--- a/fs/bcachefs/sb-members.c
+++ b/fs/bcachefs/sb-members.c
@@ -1,12 +1,24 @@
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
+#include "btree_cache.h"
#include "disk_groups.h"
+#include "error.h"
#include "opts.h"
#include "replicas.h"
#include "sb-members.h"
#include "super-io.h"
+void bch2_dev_missing(struct bch_fs *c, unsigned dev)
+{
+ bch2_fs_inconsistent(c, "pointer to nonexistent device %u", dev);
+}
+
+void bch2_dev_bucket_missing(struct bch_fs *c, struct bpos bucket)
+{
+ bch2_fs_inconsistent(c, "pointer to nonexistent bucket %llu:%llu", bucket.inode, bucket.offset);
+}
+
#define x(t, n, ...) [n] = #t,
static const char * const bch2_iops_measurements[] = {
BCH_IOPS_MEASUREMENTS()
@@ -123,9 +135,9 @@ static int validate_member(struct printbuf *err,
struct bch_sb *sb,
int i)
{
- if (le64_to_cpu(m.nbuckets) > LONG_MAX) {
- prt_printf(err, "device %u: too many buckets (got %llu, max %lu)",
- i, le64_to_cpu(m.nbuckets), LONG_MAX);
+ if (le64_to_cpu(m.nbuckets) > BCH_MEMBER_NBUCKETS_MAX) {
+ prt_printf(err, "device %u: too many buckets (got %llu, max %u)",
+ i, le64_to_cpu(m.nbuckets), BCH_MEMBER_NBUCKETS_MAX);
return -BCH_ERR_invalid_sb_members;
}
@@ -163,18 +175,14 @@ static void member_to_text(struct printbuf *out,
u64 bucket_size = le16_to_cpu(m.bucket_size);
u64 device_size = le64_to_cpu(m.nbuckets) * bucket_size;
- if (!bch2_member_exists(&m))
+ if (!bch2_member_alive(&m))
return;
- prt_printf(out, "Device:");
- prt_tab(out);
- prt_printf(out, "%u", i);
- prt_newline(out);
+ prt_printf(out, "Device:\t%u\n", i);
printbuf_indent_add(out, 2);
- prt_printf(out, "Label:");
- prt_tab(out);
+ prt_printf(out, "Label:\t");
if (BCH_MEMBER_GROUP(&m)) {
unsigned idx = BCH_MEMBER_GROUP(&m) - 1;
@@ -188,103 +196,73 @@ static void member_to_text(struct printbuf *out,
}
prt_newline(out);
- prt_printf(out, "UUID:");
- prt_tab(out);
+ prt_printf(out, "UUID:\t");
pr_uuid(out, m.uuid.b);
prt_newline(out);
- prt_printf(out, "Size:");
- prt_tab(out);
+ prt_printf(out, "Size:\t");
prt_units_u64(out, device_size << 9);
prt_newline(out);
- for (unsigned i = 0; i < BCH_MEMBER_ERROR_NR; i++) {
- prt_printf(out, "%s errors:", bch2_member_error_strs[i]);
- prt_tab(out);
- prt_u64(out, le64_to_cpu(m.errors[i]));
- prt_newline(out);
- }
+ for (unsigned i = 0; i < BCH_MEMBER_ERROR_NR; i++)
+ prt_printf(out, "%s errors:\t%llu\n", bch2_member_error_strs[i], le64_to_cpu(m.errors[i]));
- for (unsigned i = 0; i < BCH_IOPS_NR; i++) {
- prt_printf(out, "%s iops:", bch2_iops_measurements[i]);
- prt_tab(out);
- prt_printf(out, "%u", le32_to_cpu(m.iops[i]));
- prt_newline(out);
- }
+ for (unsigned i = 0; i < BCH_IOPS_NR; i++)
+ prt_printf(out, "%s iops:\t%u\n", bch2_iops_measurements[i], le32_to_cpu(m.iops[i]));
- prt_printf(out, "Bucket size:");
- prt_tab(out);
+ prt_printf(out, "Bucket size:\t");
prt_units_u64(out, bucket_size << 9);
prt_newline(out);
- prt_printf(out, "First bucket:");
- prt_tab(out);
- prt_printf(out, "%u", le16_to_cpu(m.first_bucket));
- prt_newline(out);
-
- prt_printf(out, "Buckets:");
- prt_tab(out);
- prt_printf(out, "%llu", le64_to_cpu(m.nbuckets));
- prt_newline(out);
+ prt_printf(out, "First bucket:\t%u\n", le16_to_cpu(m.first_bucket));
+ prt_printf(out, "Buckets:\t%llu\n", le64_to_cpu(m.nbuckets));
- prt_printf(out, "Last mount:");
- prt_tab(out);
+ prt_printf(out, "Last mount:\t");
if (m.last_mount)
bch2_prt_datetime(out, le64_to_cpu(m.last_mount));
else
prt_printf(out, "(never)");
prt_newline(out);
- prt_printf(out, "Last superblock write:");
- prt_tab(out);
- prt_u64(out, le64_to_cpu(m.seq));
- prt_newline(out);
+ prt_printf(out, "Last superblock write:\t%llu\n", le64_to_cpu(m.seq));
- prt_printf(out, "State:");
- prt_tab(out);
- prt_printf(out, "%s",
+ prt_printf(out, "State:\t%s\n",
BCH_MEMBER_STATE(&m) < BCH_MEMBER_STATE_NR
? bch2_member_states[BCH_MEMBER_STATE(&m)]
: "unknown");
- prt_newline(out);
- prt_printf(out, "Data allowed:");
- prt_tab(out);
+ prt_printf(out, "Data allowed:\t");
if (BCH_MEMBER_DATA_ALLOWED(&m))
prt_bitflags(out, __bch2_data_types, BCH_MEMBER_DATA_ALLOWED(&m));
else
prt_printf(out, "(none)");
prt_newline(out);
- prt_printf(out, "Has data:");
- prt_tab(out);
+ prt_printf(out, "Has data:\t");
if (data_have)
prt_bitflags(out, __bch2_data_types, data_have);
else
prt_printf(out, "(none)");
prt_newline(out);
- prt_str(out, "Durability:");
- prt_tab(out);
- prt_printf(out, "%llu", BCH_MEMBER_DURABILITY(&m) ? BCH_MEMBER_DURABILITY(&m) - 1 : 1);
+ prt_printf(out, "Btree allocated bitmap blocksize:\t");
+ prt_units_u64(out, 1ULL << m.btree_bitmap_shift);
prt_newline(out);
- prt_printf(out, "Discard:");
- prt_tab(out);
- prt_printf(out, "%llu", BCH_MEMBER_DISCARD(&m));
+ prt_printf(out, "Btree allocated bitmap:\t");
+ bch2_prt_u64_base2_nbits(out, le64_to_cpu(m.btree_allocated_bitmap), 64);
prt_newline(out);
- prt_printf(out, "Freespace initialized:");
- prt_tab(out);
- prt_printf(out, "%llu", BCH_MEMBER_FREESPACE_INITIALIZED(&m));
- prt_newline(out);
+ prt_printf(out, "Durability:\t%llu\n", BCH_MEMBER_DURABILITY(&m) ? BCH_MEMBER_DURABILITY(&m) - 1 : 1);
+
+ prt_printf(out, "Discard:\t%llu\n", BCH_MEMBER_DISCARD(&m));
+ prt_printf(out, "Freespace initialized:\t%llu\n", BCH_MEMBER_FREESPACE_INITIALIZED(&m));
printbuf_indent_sub(out, 2);
}
-static int bch2_sb_members_v1_validate(struct bch_sb *sb,
- struct bch_sb_field *f,
- struct printbuf *err)
+static int bch2_sb_members_v1_validate(struct bch_sb *sb, struct bch_sb_field *f,
+ enum bch_validate_flags flags, struct printbuf *err)
{
struct bch_sb_field_members_v1 *mi = field_to_type(f, members_v1);
unsigned i;
@@ -332,9 +310,8 @@ static void bch2_sb_members_v2_to_text(struct printbuf *out, struct bch_sb *sb,
member_to_text(out, members_v2_get(mi, i), gi, sb, i);
}
-static int bch2_sb_members_v2_validate(struct bch_sb *sb,
- struct bch_sb_field *f,
- struct printbuf *err)
+static int bch2_sb_members_v2_validate(struct bch_sb *sb, struct bch_sb_field *f,
+ enum bch_validate_flags flags, struct printbuf *err)
{
struct bch_sb_field_members_v2 *mi = field_to_type(f, members_v2);
size_t mi_bytes = (void *) __bch2_members_v2_get_mut(mi, sb->nr_devices) -
@@ -389,12 +366,8 @@ void bch2_dev_io_errors_to_text(struct printbuf *out, struct bch_dev *ca)
prt_newline(out);
printbuf_indent_add(out, 2);
- for (unsigned i = 0; i < BCH_MEMBER_ERROR_NR; i++) {
- prt_printf(out, "%s:", bch2_member_error_strs[i]);
- prt_tab(out);
- prt_u64(out, atomic64_read(&ca->errors[i]));
- prt_newline(out);
- }
+ for (unsigned i = 0; i < BCH_MEMBER_ERROR_NR; i++)
+ prt_printf(out, "%s:\t%llu\n", bch2_member_error_strs[i], atomic64_read(&ca->errors[i]));
printbuf_indent_sub(out, 2);
prt_str(out, "IO errors since ");
@@ -403,12 +376,9 @@ void bch2_dev_io_errors_to_text(struct printbuf *out, struct bch_dev *ca)
prt_newline(out);
printbuf_indent_add(out, 2);
- for (unsigned i = 0; i < BCH_MEMBER_ERROR_NR; i++) {
- prt_printf(out, "%s:", bch2_member_error_strs[i]);
- prt_tab(out);
- prt_u64(out, atomic64_read(&ca->errors[i]) - le64_to_cpu(m.errors_at_reset[i]));
- prt_newline(out);
- }
+ for (unsigned i = 0; i < BCH_MEMBER_ERROR_NR; i++)
+ prt_printf(out, "%s:\t%llu\n", bch2_member_error_strs[i],
+ atomic64_read(&ca->errors[i]) - le64_to_cpu(m.errors_at_reset[i]));
printbuf_indent_sub(out, 2);
}
@@ -426,3 +396,71 @@ void bch2_dev_errors_reset(struct bch_dev *ca)
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
}
+
+/*
+ * Per member "range has btree nodes" bitmap:
+ *
+ * This is so that if we ever have to run the btree node scan to repair we don't
+ * have to scan full devices:
+ */
+
+bool bch2_dev_btree_bitmap_marked(struct bch_fs *c, struct bkey_s_c k)
+{
+ bool ret = true;
+ rcu_read_lock();
+ bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr) {
+ struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
+ if (!ca)
+ continue;
+
+ if (!bch2_dev_btree_bitmap_marked_sectors(ca, ptr->offset, btree_sectors(c))) {
+ ret = false;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+static void __bch2_dev_btree_bitmap_mark(struct bch_sb_field_members_v2 *mi, unsigned dev,
+ u64 start, unsigned sectors)
+{
+ struct bch_member *m = __bch2_members_v2_get_mut(mi, dev);
+ u64 bitmap = le64_to_cpu(m->btree_allocated_bitmap);
+
+ u64 end = start + sectors;
+
+ int resize = ilog2(roundup_pow_of_two(end)) - (m->btree_bitmap_shift + 6);
+ if (resize > 0) {
+ u64 new_bitmap = 0;
+
+ for (unsigned i = 0; i < 64; i++)
+ if (bitmap & BIT_ULL(i))
+ new_bitmap |= BIT_ULL(i >> resize);
+ bitmap = new_bitmap;
+ m->btree_bitmap_shift += resize;
+ }
+
+ BUG_ON(m->btree_bitmap_shift > 57);
+ BUG_ON(end > 64ULL << m->btree_bitmap_shift);
+
+ for (unsigned bit = start >> m->btree_bitmap_shift;
+ (u64) bit << m->btree_bitmap_shift < end;
+ bit++)
+ bitmap |= BIT_ULL(bit);
+
+ m->btree_allocated_bitmap = cpu_to_le64(bitmap);
+}
+
+void bch2_dev_btree_bitmap_mark(struct bch_fs *c, struct bkey_s_c k)
+{
+ lockdep_assert_held(&c->sb_lock);
+
+ struct bch_sb_field_members_v2 *mi = bch2_sb_field_get(c->disk_sb.sb, members_v2);
+ bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr) {
+ if (!bch2_member_exists(c->disk_sb.sb, ptr->dev))
+ continue;
+
+ __bch2_dev_btree_bitmap_mark(mi, ptr->dev, ptr->offset, btree_sectors(c));
+ }
+}
diff --git a/fs/bcachefs/sb-members.h b/fs/bcachefs/sb-members.h
index be0a94183271..dd93192ec065 100644
--- a/fs/bcachefs/sb-members.h
+++ b/fs/bcachefs/sb-members.h
@@ -3,6 +3,7 @@
#define _BCACHEFS_SB_MEMBERS_H
#include "darray.h"
+#include "bkey_types.h"
extern char * const bch2_member_error_strs[];
@@ -28,19 +29,6 @@ static inline bool bch2_dev_is_readable(struct bch_dev *ca)
ca->mi.state != BCH_MEMBER_STATE_failed;
}
-static inline bool bch2_dev_get_ioref(struct bch_dev *ca, int rw)
-{
- if (!percpu_ref_tryget(&ca->io_ref))
- return false;
-
- if (ca->mi.state == BCH_MEMBER_STATE_rw ||
- (ca->mi.state == BCH_MEMBER_STATE_ro && rw == READ))
- return true;
-
- percpu_ref_put(&ca->io_ref);
- return false;
-}
-
static inline unsigned dev_mask_nr(const struct bch_devs_mask *devs)
{
return bitmap_weight(devs->d, BCH_SB_MEMBERS_MAX);
@@ -104,14 +92,41 @@ static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, struct bch_dev *
for (struct bch_dev *_ca = NULL; \
(_ca = __bch2_next_dev((_c), _ca, (_mask)));)
-static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, struct bch_dev *ca)
+static inline void bch2_dev_get(struct bch_dev *ca)
+{
+#ifdef CONFIG_BCACHEFS_DEBUG
+ BUG_ON(atomic_long_inc_return(&ca->ref) <= 1L);
+#else
+ percpu_ref_get(&ca->ref);
+#endif
+}
+
+static inline void __bch2_dev_put(struct bch_dev *ca)
+{
+#ifdef CONFIG_BCACHEFS_DEBUG
+ long r = atomic_long_dec_return(&ca->ref);
+ if (r < (long) !ca->dying)
+ panic("bch_dev->ref underflow, last put: %pS\n", (void *) ca->last_put);
+ ca->last_put = _THIS_IP_;
+ if (!r)
+ complete(&ca->ref_completion);
+#else
+ percpu_ref_put(&ca->ref);
+#endif
+}
+
+static inline void bch2_dev_put(struct bch_dev *ca)
{
if (ca)
- percpu_ref_put(&ca->ref);
+ __bch2_dev_put(ca);
+}
+static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, struct bch_dev *ca)
+{
rcu_read_lock();
+ bch2_dev_put(ca);
if ((ca = __bch2_next_dev(c, ca, NULL)))
- percpu_ref_get(&ca->ref);
+ bch2_dev_get(ca);
rcu_read_unlock();
return ca;
@@ -131,10 +146,10 @@ static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
struct bch_dev *ca,
unsigned state_mask)
{
+ rcu_read_lock();
if (ca)
percpu_ref_put(&ca->io_ref);
- rcu_read_lock();
while ((ca = __bch2_next_dev(c, ca, NULL)) &&
(!((1 << ca->mi.state) & state_mask) ||
!percpu_ref_tryget(&ca->io_ref)))
@@ -157,26 +172,113 @@ static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
#define for_each_readable_member(c, ca) \
__for_each_online_member(c, ca, BIT( BCH_MEMBER_STATE_rw)|BIT(BCH_MEMBER_STATE_ro))
-/*
- * If a key exists that references a device, the device won't be going away and
- * we can omit rcu_read_lock():
- */
-static inline struct bch_dev *bch_dev_bkey_exists(const struct bch_fs *c, unsigned idx)
+static inline bool bch2_dev_exists(const struct bch_fs *c, unsigned dev)
{
- EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]);
+ return dev < c->sb.nr_devices && c->devs[dev];
+}
- return rcu_dereference_check(c->devs[idx], 1);
+static inline bool bucket_valid(const struct bch_dev *ca, u64 b)
+{
+ return b - ca->mi.first_bucket < ca->mi.nbuckets_minus_first;
}
-static inline struct bch_dev *bch_dev_locked(struct bch_fs *c, unsigned idx)
+static inline struct bch_dev *bch2_dev_have_ref(const struct bch_fs *c, unsigned dev)
{
- EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]);
+ EBUG_ON(!bch2_dev_exists(c, dev));
- return rcu_dereference_protected(c->devs[idx],
+ return rcu_dereference_check(c->devs[dev], 1);
+}
+
+static inline struct bch_dev *bch2_dev_locked(struct bch_fs *c, unsigned dev)
+{
+ EBUG_ON(!bch2_dev_exists(c, dev));
+
+ return rcu_dereference_protected(c->devs[dev],
lockdep_is_held(&c->sb_lock) ||
lockdep_is_held(&c->state_lock));
}
+static inline struct bch_dev *bch2_dev_rcu(struct bch_fs *c, unsigned dev)
+{
+ return c && dev < c->sb.nr_devices
+ ? rcu_dereference(c->devs[dev])
+ : NULL;
+}
+
+static inline struct bch_dev *bch2_dev_tryget_noerror(struct bch_fs *c, unsigned dev)
+{
+ rcu_read_lock();
+ struct bch_dev *ca = bch2_dev_rcu(c, dev);
+ if (ca)
+ bch2_dev_get(ca);
+ rcu_read_unlock();
+ return ca;
+}
+
+void bch2_dev_missing(struct bch_fs *, unsigned);
+
+static inline struct bch_dev *bch2_dev_tryget(struct bch_fs *c, unsigned dev)
+{
+ struct bch_dev *ca = bch2_dev_tryget_noerror(c, dev);
+ if (!ca)
+ bch2_dev_missing(c, dev);
+ return ca;
+}
+
+static inline struct bch_dev *bch2_dev_bucket_tryget_noerror(struct bch_fs *c, struct bpos bucket)
+{
+ struct bch_dev *ca = bch2_dev_tryget_noerror(c, bucket.inode);
+ if (ca && !bucket_valid(ca, bucket.offset)) {
+ bch2_dev_put(ca);
+ ca = NULL;
+ }
+ return ca;
+}
+
+void bch2_dev_bucket_missing(struct bch_fs *, struct bpos);
+
+static inline struct bch_dev *bch2_dev_bucket_tryget(struct bch_fs *c, struct bpos bucket)
+{
+ struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, bucket);
+ if (!ca)
+ bch2_dev_bucket_missing(c, bucket);
+ return ca;
+}
+
+static inline struct bch_dev *bch2_dev_iterate_noerror(struct bch_fs *c, struct bch_dev *ca, unsigned dev_idx)
+{
+ if (ca && ca->dev_idx == dev_idx)
+ return ca;
+ bch2_dev_put(ca);
+ return bch2_dev_tryget_noerror(c, dev_idx);
+}
+
+static inline struct bch_dev *bch2_dev_iterate(struct bch_fs *c, struct bch_dev *ca, unsigned dev_idx)
+{
+ if (ca && ca->dev_idx == dev_idx)
+ return ca;
+ bch2_dev_put(ca);
+ return bch2_dev_tryget(c, dev_idx);
+}
+
+static inline struct bch_dev *bch2_dev_get_ioref(struct bch_fs *c, unsigned dev, int rw)
+{
+ rcu_read_lock();
+ struct bch_dev *ca = bch2_dev_rcu(c, dev);
+ if (ca && !percpu_ref_tryget(&ca->io_ref))
+ ca = NULL;
+ rcu_read_unlock();
+
+ if (ca &&
+ (ca->mi.state == BCH_MEMBER_STATE_rw ||
+ (ca->mi.state == BCH_MEMBER_STATE_ro && rw == READ)))
+ return ca;
+
+ if (ca)
+ percpu_ref_put(&ca->io_ref);
+ return NULL;
+}
+
/* XXX kill, move to struct bch_fs */
static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c)
{
@@ -191,16 +293,16 @@ static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c)
extern const struct bch_sb_field_ops bch_sb_field_ops_members_v1;
extern const struct bch_sb_field_ops bch_sb_field_ops_members_v2;
-static inline bool bch2_member_exists(struct bch_member *m)
+static inline bool bch2_member_alive(struct bch_member *m)
{
return !bch2_is_zero(&m->uuid, sizeof(m->uuid));
}
-static inline bool bch2_dev_exists(struct bch_sb *sb, unsigned dev)
+static inline bool bch2_member_exists(struct bch_sb *sb, unsigned dev)
{
if (dev < sb->nr_devices) {
struct bch_member m = bch2_sb_member_get(sb, dev);
- return bch2_member_exists(&m);
+ return bch2_member_alive(&m);
}
return false;
}
@@ -209,6 +311,8 @@ static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi)
{
return (struct bch_member_cpu) {
.nbuckets = le64_to_cpu(mi->nbuckets),
+ .nbuckets_minus_first = le64_to_cpu(mi->nbuckets) -
+ le16_to_cpu(mi->first_bucket),
.first_bucket = le16_to_cpu(mi->first_bucket),
.bucket_size = le16_to_cpu(mi->bucket_size),
.group = BCH_MEMBER_GROUP(mi),
@@ -219,7 +323,9 @@ static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi)
? BCH_MEMBER_DURABILITY(mi) - 1
: 1,
.freespace_initialized = BCH_MEMBER_FREESPACE_INITIALIZED(mi),
- .valid = bch2_member_exists(mi),
+ .valid = bch2_member_alive(mi),
+ .btree_bitmap_shift = mi->btree_bitmap_shift,
+ .btree_allocated_bitmap = le64_to_cpu(mi->btree_allocated_bitmap),
};
}
@@ -228,4 +334,22 @@ void bch2_sb_members_from_cpu(struct bch_fs *);
void bch2_dev_io_errors_to_text(struct printbuf *, struct bch_dev *);
void bch2_dev_errors_reset(struct bch_dev *);
+static inline bool bch2_dev_btree_bitmap_marked_sectors(struct bch_dev *ca, u64 start, unsigned sectors)
+{
+ u64 end = start + sectors;
+
+ if (end > 64ULL << ca->mi.btree_bitmap_shift)
+ return false;
+
+ for (unsigned bit = start >> ca->mi.btree_bitmap_shift;
+ (u64) bit << ca->mi.btree_bitmap_shift < end;
+ bit++)
+ if (!(ca->mi.btree_allocated_bitmap & BIT_ULL(bit)))
+ return false;
+ return true;
+}
+
+bool bch2_dev_btree_bitmap_marked(struct bch_fs *, struct bkey_s_c);
+void bch2_dev_btree_bitmap_mark(struct bch_fs *, struct bkey_s_c);
+
#endif /* _BCACHEFS_SB_MEMBERS_H */
diff --git a/fs/bcachefs/sb-members_types.h b/fs/bcachefs/sb-members_types.h
new file mode 100644
index 000000000000..c0eda888fe39
--- /dev/null
+++ b/fs/bcachefs/sb-members_types.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SB_MEMBERS_TYPES_H
+#define _BCACHEFS_SB_MEMBERS_TYPES_H
+
+struct bch_member_cpu {
+ u64 nbuckets; /* device size */
+ u64 nbuckets_minus_first;
+ u16 first_bucket; /* index of first bucket used */
+ u16 bucket_size; /* sectors */
+ u16 group;
+ u8 state;
+ u8 discard;
+ u8 data_allowed;
+ u8 durability;
+ u8 freespace_initialized;
+ u8 valid;
+ u8 btree_bitmap_shift;
+ u64 btree_allocated_bitmap;
+};
+
+#endif /* _BCACHEFS_SB_MEMBERS_H */
diff --git a/fs/bcachefs/snapshot.c b/fs/bcachefs/snapshot.c
index 39debe814bf3..629900a5e641 100644
--- a/fs/bcachefs/snapshot.c
+++ b/fs/bcachefs/snapshot.c
@@ -8,6 +8,7 @@
#include "errcode.h"
#include "error.h"
#include "fs.h"
+#include "recovery_passes.h"
#include "snapshot.h"
#include <linux/random.h>
@@ -31,7 +32,7 @@ void bch2_snapshot_tree_to_text(struct printbuf *out, struct bch_fs *c,
}
int bch2_snapshot_tree_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
struct printbuf *err)
{
int ret = 0;
@@ -48,7 +49,7 @@ int bch2_snapshot_tree_lookup(struct btree_trans *trans, u32 id,
struct bch_snapshot_tree *s)
{
int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_snapshot_trees, POS(0, id),
- BTREE_ITER_WITH_UPDATES, snapshot_tree, s);
+ BTREE_ITER_with_updates, snapshot_tree, s);
if (bch2_err_matches(ret, ENOENT))
ret = -BCH_ERR_ENOENT_snapshot_tree;
@@ -93,8 +94,10 @@ static int bch2_snapshot_tree_create(struct btree_trans *trans,
static bool __bch2_snapshot_is_ancestor_early(struct snapshot_table *t, u32 id, u32 ancestor)
{
- while (id && id < ancestor)
- id = __snapshot_t(t, id)->parent;
+ while (id && id < ancestor) {
+ const struct snapshot_t *s = __snapshot_t(t, id);
+ id = s ? s->parent : 0;
+ }
return id == ancestor;
}
@@ -110,6 +113,8 @@ static bool bch2_snapshot_is_ancestor_early(struct bch_fs *c, u32 id, u32 ancest
static inline u32 get_ancestor_below(struct snapshot_table *t, u32 id, u32 ancestor)
{
const struct snapshot_t *s = __snapshot_t(t, id);
+ if (!s)
+ return 0;
if (s->skip[2] <= ancestor)
return s->skip[2];
@@ -120,6 +125,15 @@ static inline u32 get_ancestor_below(struct snapshot_table *t, u32 id, u32 ances
return s->parent;
}
+static bool test_ancestor_bitmap(struct snapshot_table *t, u32 id, u32 ancestor)
+{
+ const struct snapshot_t *s = __snapshot_t(t, id);
+ if (!s)
+ return false;
+
+ return test_bit(ancestor - id - 1, s->is_ancestor);
+}
+
bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
{
bool ret;
@@ -127,7 +141,7 @@ bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
rcu_read_lock();
struct snapshot_table *t = rcu_dereference(c->snapshots);
- if (unlikely(c->recovery_pass_done <= BCH_RECOVERY_PASS_check_snapshots)) {
+ if (unlikely(c->recovery_pass_done < BCH_RECOVERY_PASS_check_snapshots)) {
ret = __bch2_snapshot_is_ancestor_early(t, id, ancestor);
goto out;
}
@@ -135,13 +149,11 @@ bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
while (id && id < ancestor - IS_ANCESTOR_BITMAP)
id = get_ancestor_below(t, id, ancestor);
- if (id && id < ancestor) {
- ret = test_bit(ancestor - id - 1, __snapshot_t(t, id)->is_ancestor);
+ ret = id && id < ancestor
+ ? test_ancestor_bitmap(t, id, ancestor)
+ : id == ancestor;
- EBUG_ON(ret != __bch2_snapshot_is_ancestor_early(t, id, ancestor));
- } else {
- ret = id == ancestor;
- }
+ EBUG_ON(ret != __bch2_snapshot_is_ancestor_early(t, id, ancestor));
out:
rcu_read_unlock();
@@ -151,36 +163,39 @@ out:
static noinline struct snapshot_t *__snapshot_t_mut(struct bch_fs *c, u32 id)
{
size_t idx = U32_MAX - id;
- size_t new_size;
struct snapshot_table *new, *old;
- new_size = max(16UL, roundup_pow_of_two(idx + 1));
+ size_t new_bytes = kmalloc_size_roundup(struct_size(new, s, idx + 1));
+ size_t new_size = (new_bytes - sizeof(*new)) / sizeof(new->s[0]);
- new = kvzalloc(struct_size(new, s, new_size), GFP_KERNEL);
+ new = kvzalloc(new_bytes, GFP_KERNEL);
if (!new)
return NULL;
+ new->nr = new_size;
+
old = rcu_dereference_protected(c->snapshots, true);
if (old)
- memcpy(new->s,
- rcu_dereference_protected(c->snapshots, true)->s,
- sizeof(new->s[0]) * c->snapshot_table_size);
+ memcpy(new->s, old->s, sizeof(old->s[0]) * old->nr);
rcu_assign_pointer(c->snapshots, new);
- c->snapshot_table_size = new_size;
- kvfree_rcu_mightsleep(old);
+ kvfree_rcu(old, rcu);
- return &rcu_dereference_protected(c->snapshots, true)->s[idx];
+ return &rcu_dereference_protected(c->snapshots,
+ lockdep_is_held(&c->snapshot_table_lock))->s[idx];
}
static inline struct snapshot_t *snapshot_t_mut(struct bch_fs *c, u32 id)
{
size_t idx = U32_MAX - id;
+ struct snapshot_table *table =
+ rcu_dereference_protected(c->snapshots,
+ lockdep_is_held(&c->snapshot_table_lock));
lockdep_assert_held(&c->snapshot_table_lock);
- if (likely(idx < c->snapshot_table_size))
- return &rcu_dereference_protected(c->snapshots, true)->s[idx];
+ if (likely(table && idx < table->nr))
+ return &table->s[idx];
return __snapshot_t_mut(c, id);
}
@@ -208,7 +223,7 @@ void bch2_snapshot_to_text(struct printbuf *out, struct bch_fs *c,
}
int bch2_snapshot_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
struct printbuf *err)
{
struct bkey_s_c_snapshot s;
@@ -283,7 +298,7 @@ static void set_is_ancestor_bitmap(struct bch_fs *c, u32 id)
static int __bch2_mark_snapshot(struct btree_trans *trans,
enum btree_id btree, unsigned level,
struct bkey_s_c old, struct bkey_s_c new,
- unsigned flags)
+ enum btree_iter_update_trigger_flags flags)
{
struct bch_fs *c = trans->c;
struct snapshot_t *t;
@@ -337,7 +352,7 @@ err:
int bch2_mark_snapshot(struct btree_trans *trans,
enum btree_id btree, unsigned level,
struct bkey_s_c old, struct bkey_s new,
- unsigned flags)
+ enum btree_iter_update_trigger_flags flags)
{
return __bch2_mark_snapshot(trans, btree, level, old, new.s_c, flags);
}
@@ -346,7 +361,7 @@ int bch2_snapshot_lookup(struct btree_trans *trans, u32 id,
struct bch_snapshot *s)
{
return bch2_bkey_get_val_typed(trans, BTREE_ID_snapshots, POS(0, id),
- BTREE_ITER_WITH_UPDATES, snapshot, s);
+ BTREE_ITER_with_updates, snapshot, s);
}
static int bch2_snapshot_live(struct btree_trans *trans, u32 id)
@@ -567,6 +582,13 @@ static int check_snapshot_tree(struct btree_trans *trans,
u32 subvol_id;
ret = bch2_snapshot_tree_master_subvol(trans, root_id, &subvol_id);
+ bch_err_fn(c, ret);
+
+ if (bch2_err_matches(ret, ENOENT)) { /* nothing to be done here */
+ ret = 0;
+ goto err;
+ }
+
if (ret)
goto err;
@@ -596,7 +618,7 @@ int bch2_check_snapshot_trees(struct bch_fs *c)
int ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter,
BTREE_ID_snapshot_trees, POS_MIN,
- BTREE_ITER_PREFETCH, k,
+ BTREE_ITER_prefetch, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
check_snapshot_tree(trans, &iter, k)));
bch_err_fn(c, ret);
@@ -673,7 +695,7 @@ static int snapshot_tree_ptr_repair(struct btree_trans *trans,
root = bch2_bkey_get_iter_typed(trans, &root_iter,
BTREE_ID_snapshots, POS(0, root_id),
- BTREE_ITER_WITH_UPDATES, snapshot);
+ BTREE_ITER_with_updates, snapshot);
ret = bkey_err(root);
if (ret)
goto err;
@@ -724,7 +746,6 @@ static int check_snapshot(struct btree_trans *trans,
u32 parent_id = bch2_snapshot_parent_early(c, k.k->p.offset);
u32 real_depth;
struct printbuf buf = PRINTBUF;
- bool should_have_subvol;
u32 i, id;
int ret = 0;
@@ -770,7 +791,7 @@ static int check_snapshot(struct btree_trans *trans,
}
}
- should_have_subvol = BCH_SNAPSHOT_SUBVOL(&s) &&
+ bool should_have_subvol = BCH_SNAPSHOT_SUBVOL(&s) &&
!BCH_SNAPSHOT_DELETED(&s);
if (should_have_subvol) {
@@ -865,13 +886,162 @@ int bch2_check_snapshots(struct bch_fs *c)
int ret = bch2_trans_run(c,
for_each_btree_key_reverse_commit(trans, iter,
BTREE_ID_snapshots, POS_MAX,
- BTREE_ITER_PREFETCH, k,
+ BTREE_ITER_prefetch, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
check_snapshot(trans, &iter, k)));
bch_err_fn(c, ret);
return ret;
}
+static int check_snapshot_exists(struct btree_trans *trans, u32 id)
+{
+ struct bch_fs *c = trans->c;
+
+ if (bch2_snapshot_equiv(c, id))
+ return 0;
+
+ /* 0 is an invalid tree ID */
+ u32 tree_id = 0;
+ int ret = bch2_snapshot_tree_create(trans, id, 0, &tree_id);
+ if (ret)
+ return ret;
+
+ struct bkey_i_snapshot *snapshot = bch2_trans_kmalloc(trans, sizeof(*snapshot));
+ ret = PTR_ERR_OR_ZERO(snapshot);
+ if (ret)
+ return ret;
+
+ bkey_snapshot_init(&snapshot->k_i);
+ snapshot->k.p = POS(0, id);
+ snapshot->v.tree = cpu_to_le32(tree_id);
+ snapshot->v.btime.lo = cpu_to_le64(bch2_current_time(c));
+
+ return bch2_btree_insert_trans(trans, BTREE_ID_snapshots, &snapshot->k_i, 0) ?:
+ bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0,
+ bkey_s_c_null, bkey_i_to_s(&snapshot->k_i), 0) ?:
+ bch2_snapshot_set_equiv(trans, bkey_i_to_s_c(&snapshot->k_i));
+}
+
+/* Figure out which snapshot nodes belong in the same tree: */
+struct snapshot_tree_reconstruct {
+ enum btree_id btree;
+ struct bpos cur_pos;
+ snapshot_id_list cur_ids;
+ DARRAY(snapshot_id_list) trees;
+};
+
+static void snapshot_tree_reconstruct_exit(struct snapshot_tree_reconstruct *r)
+{
+ darray_for_each(r->trees, i)
+ darray_exit(i);
+ darray_exit(&r->trees);
+ darray_exit(&r->cur_ids);
+}
+
+static inline bool same_snapshot(struct snapshot_tree_reconstruct *r, struct bpos pos)
+{
+ return r->btree == BTREE_ID_inodes
+ ? r->cur_pos.offset == pos.offset
+ : r->cur_pos.inode == pos.inode;
+}
+
+static inline bool snapshot_id_lists_have_common(snapshot_id_list *l, snapshot_id_list *r)
+{
+ darray_for_each(*l, i)
+ if (snapshot_list_has_id(r, *i))
+ return true;
+ return false;
+}
+
+static void snapshot_id_list_to_text(struct printbuf *out, snapshot_id_list *s)
+{
+ bool first = true;
+ darray_for_each(*s, i) {
+ if (!first)
+ prt_char(out, ' ');
+ first = false;
+ prt_printf(out, "%u", *i);
+ }
+}
+
+static int snapshot_tree_reconstruct_next(struct bch_fs *c, struct snapshot_tree_reconstruct *r)
+{
+ if (r->cur_ids.nr) {
+ darray_for_each(r->trees, i)
+ if (snapshot_id_lists_have_common(i, &r->cur_ids)) {
+ int ret = snapshot_list_merge(c, i, &r->cur_ids);
+ if (ret)
+ return ret;
+ goto out;
+ }
+ darray_push(&r->trees, r->cur_ids);
+ darray_init(&r->cur_ids);
+ }
+out:
+ r->cur_ids.nr = 0;
+ return 0;
+}
+
+static int get_snapshot_trees(struct bch_fs *c, struct snapshot_tree_reconstruct *r, struct bpos pos)
+{
+ if (!same_snapshot(r, pos))
+ snapshot_tree_reconstruct_next(c, r);
+ r->cur_pos = pos;
+ return snapshot_list_add_nodup(c, &r->cur_ids, pos.snapshot);
+}
+
+int bch2_reconstruct_snapshots(struct bch_fs *c)
+{
+ struct btree_trans *trans = bch2_trans_get(c);
+ struct printbuf buf = PRINTBUF;
+ struct snapshot_tree_reconstruct r = {};
+ int ret = 0;
+
+ for (unsigned btree = 0; btree < BTREE_ID_NR; btree++) {
+ if (btree_type_has_snapshots(btree)) {
+ r.btree = btree;
+
+ ret = for_each_btree_key(trans, iter, btree, POS_MIN,
+ BTREE_ITER_all_snapshots|BTREE_ITER_prefetch, k, ({
+ get_snapshot_trees(c, &r, k.k->p);
+ }));
+ if (ret)
+ goto err;
+
+ snapshot_tree_reconstruct_next(c, &r);
+ }
+ }
+
+ darray_for_each(r.trees, t) {
+ printbuf_reset(&buf);
+ snapshot_id_list_to_text(&buf, t);
+
+ darray_for_each(*t, id) {
+ if (fsck_err_on(!bch2_snapshot_equiv(c, *id),
+ c, snapshot_node_missing,
+ "snapshot node %u from tree %s missing, recreate?", *id, buf.buf)) {
+ if (t->nr > 1) {
+ bch_err(c, "cannot reconstruct snapshot trees with multiple nodes");
+ ret = -BCH_ERR_fsck_repair_unimplemented;
+ goto err;
+ }
+
+ ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
+ check_snapshot_exists(trans, *id));
+ if (ret)
+ goto err;
+ }
+ }
+ }
+fsck_err:
+err:
+ bch2_trans_put(trans);
+ snapshot_tree_reconstruct_exit(&r);
+ printbuf_exit(&buf);
+ bch_err_fn(c, ret);
+ return ret;
+}
+
/*
* Mark a snapshot as deleted, for future cleanup:
*/
@@ -921,7 +1091,7 @@ static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
int ret = 0;
s = bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_snapshots, POS(0, id),
- BTREE_ITER_INTENT, snapshot);
+ BTREE_ITER_intent, snapshot);
ret = bkey_err(s);
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
"missing snapshot %u", id);
@@ -1030,7 +1200,7 @@ static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree,
int ret;
bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots,
- POS_MIN, BTREE_ITER_INTENT);
+ POS_MIN, BTREE_ITER_intent);
k = bch2_btree_iter_peek(&iter);
ret = bkey_err(k);
if (ret)
@@ -1198,7 +1368,7 @@ static int snapshot_delete_key(struct btree_trans *trans,
if (snapshot_list_has_id(deleted, k.k->p.snapshot) ||
snapshot_list_has_id(equiv_seen, equiv)) {
return bch2_btree_delete_at(trans, iter,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ BTREE_UPDATE_internal_snapshot_node);
} else {
return snapshot_list_add(c, equiv_seen, equiv);
}
@@ -1235,15 +1405,15 @@ static int move_key_to_correct_snapshot(struct btree_trans *trans,
new->k.p.snapshot = equiv;
bch2_trans_iter_init(trans, &new_iter, iter->btree_id, new->k.p,
- BTREE_ITER_ALL_SNAPSHOTS|
- BTREE_ITER_CACHED|
- BTREE_ITER_INTENT);
+ BTREE_ITER_all_snapshots|
+ BTREE_ITER_cached|
+ BTREE_ITER_intent);
ret = bch2_btree_iter_traverse(&new_iter) ?:
bch2_trans_update(trans, &new_iter, new,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
+ BTREE_UPDATE_internal_snapshot_node) ?:
bch2_btree_delete_at(trans, iter,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ BTREE_UPDATE_internal_snapshot_node);
bch2_trans_iter_exit(trans, &new_iter);
if (ret)
return ret;
@@ -1434,12 +1604,12 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
ret = for_each_btree_key_commit(trans, iter,
id, POS_MIN,
- BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
+ BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
&res, NULL, BCH_TRANS_COMMIT_no_enospc,
snapshot_delete_key(trans, &iter, k, &deleted, &equiv_seen, &last_pos)) ?:
for_each_btree_key_commit(trans, iter,
id, POS_MIN,
- BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
+ BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
&res, NULL, BCH_TRANS_COMMIT_no_enospc,
move_key_to_correct_snapshot(trans, &iter, k));
@@ -1474,7 +1644,7 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
* nodes some depth fields will be off:
*/
ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots, POS_MIN,
- BTREE_ITER_INTENT, k,
+ BTREE_ITER_intent, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
bch2_fix_child_of_deleted_snapshot(trans, &iter, k, &deleted_interior));
if (ret)
@@ -1530,8 +1700,8 @@ int __bch2_key_has_snapshot_overwrites(struct btree_trans *trans,
int ret;
bch2_trans_iter_init(trans, &iter, id, pos,
- BTREE_ITER_NOT_EXTENTS|
- BTREE_ITER_ALL_SNAPSHOTS);
+ BTREE_ITER_not_extents|
+ BTREE_ITER_all_snapshots);
while (1) {
k = bch2_btree_iter_prev(&iter);
ret = bkey_err(k);
@@ -1583,7 +1753,7 @@ static int bch2_propagate_key_to_snapshot_leaf(struct btree_trans *trans,
pos.snapshot = leaf_id;
- bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_INTENT);
+ bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_intent);
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
@@ -1682,6 +1852,20 @@ int bch2_snapshots_read(struct bch_fs *c)
POS_MIN, 0, k,
(set_is_ancestor_bitmap(c, k.k->p.offset), 0)));
bch_err_fn(c, ret);
+
+ /*
+ * It's important that we check if we need to reconstruct snapshots
+ * before going RW, so we mark that pass as required in the superblock -
+ * otherwise, we could end up deleting keys with missing snapshot nodes
+ * instead
+ */
+ BUG_ON(!test_bit(BCH_FS_new_fs, &c->flags) &&
+ test_bit(BCH_FS_may_go_rw, &c->flags));
+
+ if (bch2_err_matches(ret, EIO) ||
+ (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_snapshots)))
+ ret = bch2_run_explicit_recovery_pass_persistent(c, BCH_RECOVERY_PASS_reconstruct_snapshots);
+
return ret;
}
diff --git a/fs/bcachefs/snapshot.h b/fs/bcachefs/snapshot.h
index 7c66ffc06385..bd5d74269d15 100644
--- a/fs/bcachefs/snapshot.h
+++ b/fs/bcachefs/snapshot.h
@@ -2,11 +2,11 @@
#ifndef _BCACHEFS_SNAPSHOT_H
#define _BCACHEFS_SNAPSHOT_H
-enum bkey_invalid_flags;
+enum bch_validate_flags;
void bch2_snapshot_tree_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
int bch2_snapshot_tree_invalid(struct bch_fs *, struct bkey_s_c,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
#define bch2_bkey_ops_snapshot_tree ((struct bkey_ops) { \
.key_invalid = bch2_snapshot_tree_invalid, \
@@ -20,9 +20,10 @@ int bch2_snapshot_tree_lookup(struct btree_trans *, u32, struct bch_snapshot_tre
void bch2_snapshot_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
int bch2_snapshot_invalid(struct bch_fs *, struct bkey_s_c,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
int bch2_mark_snapshot(struct btree_trans *, enum btree_id, unsigned,
- struct bkey_s_c, struct bkey_s, unsigned);
+ struct bkey_s_c, struct bkey_s,
+ enum btree_iter_update_trigger_flags);
#define bch2_bkey_ops_snapshot ((struct bkey_ops) { \
.key_invalid = bch2_snapshot_invalid, \
@@ -33,7 +34,11 @@ int bch2_mark_snapshot(struct btree_trans *, enum btree_id, unsigned,
static inline struct snapshot_t *__snapshot_t(struct snapshot_table *t, u32 id)
{
- return &t->s[U32_MAX - id];
+ u32 idx = U32_MAX - id;
+
+ return likely(t && idx < t->nr)
+ ? &t->s[idx]
+ : NULL;
}
static inline const struct snapshot_t *snapshot_t(struct bch_fs *c, u32 id)
@@ -44,7 +49,8 @@ static inline const struct snapshot_t *snapshot_t(struct bch_fs *c, u32 id)
static inline u32 bch2_snapshot_tree(struct bch_fs *c, u32 id)
{
rcu_read_lock();
- id = snapshot_t(c, id)->tree;
+ const struct snapshot_t *s = snapshot_t(c, id);
+ id = s ? s->tree : 0;
rcu_read_unlock();
return id;
@@ -52,7 +58,8 @@ static inline u32 bch2_snapshot_tree(struct bch_fs *c, u32 id)
static inline u32 __bch2_snapshot_parent_early(struct bch_fs *c, u32 id)
{
- return snapshot_t(c, id)->parent;
+ const struct snapshot_t *s = snapshot_t(c, id);
+ return s ? s->parent : 0;
}
static inline u32 bch2_snapshot_parent_early(struct bch_fs *c, u32 id)
@@ -66,19 +73,19 @@ static inline u32 bch2_snapshot_parent_early(struct bch_fs *c, u32 id)
static inline u32 __bch2_snapshot_parent(struct bch_fs *c, u32 id)
{
-#ifdef CONFIG_BCACHEFS_DEBUG
- u32 parent = snapshot_t(c, id)->parent;
+ const struct snapshot_t *s = snapshot_t(c, id);
+ if (!s)
+ return 0;
- if (parent &&
- snapshot_t(c, id)->depth != snapshot_t(c, parent)->depth + 1)
+ u32 parent = s->parent;
+ if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
+ parent &&
+ s->depth != snapshot_t(c, parent)->depth + 1)
panic("id %u depth=%u parent %u depth=%u\n",
id, snapshot_t(c, id)->depth,
parent, snapshot_t(c, parent)->depth);
return parent;
-#else
- return snapshot_t(c, id)->parent;
-#endif
}
static inline u32 bch2_snapshot_parent(struct bch_fs *c, u32 id)
@@ -116,7 +123,8 @@ static inline u32 bch2_snapshot_root(struct bch_fs *c, u32 id)
static inline u32 __bch2_snapshot_equiv(struct bch_fs *c, u32 id)
{
- return snapshot_t(c, id)->equiv;
+ const struct snapshot_t *s = snapshot_t(c, id);
+ return s ? s->equiv : 0;
}
static inline u32 bch2_snapshot_equiv(struct bch_fs *c, u32 id)
@@ -128,43 +136,22 @@ static inline u32 bch2_snapshot_equiv(struct bch_fs *c, u32 id)
return id;
}
-static inline bool bch2_snapshot_is_equiv(struct bch_fs *c, u32 id)
+static inline int bch2_snapshot_is_internal_node(struct bch_fs *c, u32 id)
{
- return id == bch2_snapshot_equiv(c, id);
-}
-
-static inline bool bch2_snapshot_is_internal_node(struct bch_fs *c, u32 id)
-{
- const struct snapshot_t *s;
- bool ret;
-
rcu_read_lock();
- s = snapshot_t(c, id);
- ret = s->children[0];
+ const struct snapshot_t *s = snapshot_t(c, id);
+ int ret = s ? s->children[0] : -BCH_ERR_invalid_snapshot_node;
rcu_read_unlock();
return ret;
}
-static inline u32 bch2_snapshot_is_leaf(struct bch_fs *c, u32 id)
+static inline int bch2_snapshot_is_leaf(struct bch_fs *c, u32 id)
{
- return !bch2_snapshot_is_internal_node(c, id);
-}
-
-static inline u32 bch2_snapshot_sibling(struct bch_fs *c, u32 id)
-{
- const struct snapshot_t *s;
- u32 parent = __bch2_snapshot_parent(c, id);
-
- if (!parent)
- return 0;
-
- s = snapshot_t(c, __bch2_snapshot_parent(c, id));
- if (id == s->children[0])
- return s->children[1];
- if (id == s->children[1])
- return s->children[0];
- return 0;
+ int ret = bch2_snapshot_is_internal_node(c, id);
+ if (ret < 0)
+ return ret;
+ return !ret;
}
static inline u32 bch2_snapshot_depth(struct bch_fs *c, u32 parent)
@@ -218,15 +205,34 @@ static inline bool snapshot_list_has_ancestor(struct bch_fs *c, snapshot_id_list
static inline int snapshot_list_add(struct bch_fs *c, snapshot_id_list *s, u32 id)
{
- int ret;
-
BUG_ON(snapshot_list_has_id(s, id));
- ret = darray_push(s, id);
+ int ret = darray_push(s, id);
if (ret)
bch_err(c, "error reallocating snapshot_id_list (size %zu)", s->size);
return ret;
}
+static inline int snapshot_list_add_nodup(struct bch_fs *c, snapshot_id_list *s, u32 id)
+{
+ int ret = snapshot_list_has_id(s, id)
+ ? 0
+ : darray_push(s, id);
+ if (ret)
+ bch_err(c, "error reallocating snapshot_id_list (size %zu)", s->size);
+ return ret;
+}
+
+static inline int snapshot_list_merge(struct bch_fs *c, snapshot_id_list *dst, snapshot_id_list *src)
+{
+ darray_for_each(*src, i) {
+ int ret = snapshot_list_add_nodup(c, dst, *i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
int bch2_snapshot_lookup(struct btree_trans *trans, u32 id,
struct bch_snapshot *s);
int bch2_snapshot_get_subvol(struct btree_trans *, u32,
@@ -238,6 +244,7 @@ int bch2_snapshot_node_create(struct btree_trans *, u32,
int bch2_check_snapshot_trees(struct bch_fs *);
int bch2_check_snapshots(struct bch_fs *);
+int bch2_reconstruct_snapshots(struct bch_fs *);
int bch2_snapshot_node_set_deleted(struct btree_trans *, u32);
void bch2_delete_dead_snapshots_work(struct work_struct *);
@@ -249,7 +256,7 @@ static inline int bch2_key_has_snapshot_overwrites(struct btree_trans *trans,
struct bpos pos)
{
if (!btree_type_has_snapshots(id) ||
- bch2_snapshot_is_leaf(trans->c, pos.snapshot))
+ bch2_snapshot_is_leaf(trans->c, pos.snapshot) > 0)
return 0;
return __bch2_key_has_snapshot_overwrites(trans, id, pos);
diff --git a/fs/bcachefs/str_hash.h b/fs/bcachefs/str_hash.h
index 3976f80721bf..cbad9b27874f 100644
--- a/fs/bcachefs/str_hash.h
+++ b/fs/bcachefs/str_hash.h
@@ -15,16 +15,6 @@
#include <crypto/hash.h>
#include <crypto/sha2.h>
-typedef unsigned __bitwise bch_str_hash_flags_t;
-
-enum bch_str_hash_flags {
- __BCH_HASH_SET_MUST_CREATE,
- __BCH_HASH_SET_MUST_REPLACE,
-};
-
-#define BCH_HASH_SET_MUST_CREATE (__force bch_str_hash_flags_t) BIT(__BCH_HASH_SET_MUST_CREATE)
-#define BCH_HASH_SET_MUST_REPLACE (__force bch_str_hash_flags_t) BIT(__BCH_HASH_SET_MUST_REPLACE)
-
static inline enum bch_str_hash_type
bch2_str_hash_opt_to_type(struct bch_fs *c, enum bch_str_hash_opts opt)
{
@@ -159,13 +149,14 @@ static inline bool is_visible_key(struct bch_hash_desc desc, subvol_inum inum, s
desc.is_visible(inum, k));
}
-static __always_inline int
+static __always_inline struct bkey_s_c
bch2_hash_lookup_in_snapshot(struct btree_trans *trans,
struct btree_iter *iter,
const struct bch_hash_desc desc,
const struct bch_hash_info *info,
subvol_inum inum, const void *key,
- unsigned flags, u32 snapshot)
+ enum btree_iter_update_trigger_flags flags,
+ u32 snapshot)
{
struct bkey_s_c k;
int ret;
@@ -173,10 +164,10 @@ bch2_hash_lookup_in_snapshot(struct btree_trans *trans,
for_each_btree_key_upto_norestart(trans, *iter, desc.btree_id,
SPOS(inum.inum, desc.hash_key(info, key), snapshot),
POS(inum.inum, U64_MAX),
- BTREE_ITER_SLOTS|flags, k, ret) {
+ BTREE_ITER_slots|flags, k, ret) {
if (is_visible_key(desc, inum, k)) {
if (!desc.cmp_key(k, key))
- return 0;
+ return k;
} else if (k.k->type == KEY_TYPE_hash_whiteout) {
;
} else {
@@ -186,20 +177,23 @@ bch2_hash_lookup_in_snapshot(struct btree_trans *trans,
}
bch2_trans_iter_exit(trans, iter);
- return ret ?: -BCH_ERR_ENOENT_str_hash_lookup;
+ return bkey_s_c_err(ret ?: -BCH_ERR_ENOENT_str_hash_lookup);
}
-static __always_inline int
+static __always_inline struct bkey_s_c
bch2_hash_lookup(struct btree_trans *trans,
struct btree_iter *iter,
const struct bch_hash_desc desc,
const struct bch_hash_info *info,
subvol_inum inum, const void *key,
- unsigned flags)
+ enum btree_iter_update_trigger_flags flags)
{
u32 snapshot;
- return bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot) ?:
- bch2_hash_lookup_in_snapshot(trans, iter, desc, info, inum, key, flags, snapshot);
+ int ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
+ if (ret)
+ return bkey_s_c_err(ret);
+
+ return bch2_hash_lookup_in_snapshot(trans, iter, desc, info, inum, key, flags, snapshot);
}
static __always_inline int
@@ -220,7 +214,7 @@ bch2_hash_hole(struct btree_trans *trans,
for_each_btree_key_upto_norestart(trans, *iter, desc.btree_id,
SPOS(inum.inum, desc.hash_key(info, key), snapshot),
POS(inum.inum, U64_MAX),
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret)
+ BTREE_ITER_slots|BTREE_ITER_intent, k, ret)
if (!is_visible_key(desc, inum, k))
return 0;
bch2_trans_iter_exit(trans, iter);
@@ -242,7 +236,7 @@ int bch2_hash_needs_whiteout(struct btree_trans *trans,
bch2_btree_iter_advance(&iter);
- for_each_btree_key_continue_norestart(iter, BTREE_ITER_SLOTS, k, ret) {
+ for_each_btree_key_continue_norestart(iter, BTREE_ITER_slots, k, ret) {
if (k.k->type != desc.key_type &&
k.k->type != KEY_TYPE_hash_whiteout)
break;
@@ -264,8 +258,7 @@ int bch2_hash_set_in_snapshot(struct btree_trans *trans,
const struct bch_hash_info *info,
subvol_inum inum, u32 snapshot,
struct bkey_i *insert,
- bch_str_hash_flags_t str_hash_flags,
- int update_flags)
+ enum btree_iter_update_trigger_flags flags)
{
struct btree_iter iter, slot = { NULL };
struct bkey_s_c k;
@@ -277,7 +270,7 @@ int bch2_hash_set_in_snapshot(struct btree_trans *trans,
desc.hash_bkey(info, bkey_i_to_s_c(insert)),
snapshot),
POS(insert->k.p.inode, U64_MAX),
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
+ BTREE_ITER_slots|BTREE_ITER_intent, k, ret) {
if (is_visible_key(desc, inum, k)) {
if (!desc.cmp_bkey(k, bkey_i_to_s_c(insert)))
goto found;
@@ -286,8 +279,7 @@ int bch2_hash_set_in_snapshot(struct btree_trans *trans,
continue;
}
- if (!slot.path &&
- !(str_hash_flags & BCH_HASH_SET_MUST_REPLACE))
+ if (!slot.path && !(flags & STR_HASH_must_replace))
bch2_trans_copy_iter(&slot, &iter);
if (k.k->type != KEY_TYPE_hash_whiteout)
@@ -305,16 +297,16 @@ found:
found = true;
not_found:
- if (!found && (str_hash_flags & BCH_HASH_SET_MUST_REPLACE)) {
+ if (!found && (flags & STR_HASH_must_replace)) {
ret = -BCH_ERR_ENOENT_str_hash_set_must_replace;
- } else if (found && (str_hash_flags & BCH_HASH_SET_MUST_CREATE)) {
+ } else if (found && (flags & STR_HASH_must_create)) {
ret = -EEXIST;
} else {
if (!found && slot.path)
swap(iter, slot);
insert->k.p = iter.pos;
- ret = bch2_trans_update(trans, &iter, insert, update_flags);
+ ret = bch2_trans_update(trans, &iter, insert, flags);
}
goto out;
@@ -326,14 +318,14 @@ int bch2_hash_set(struct btree_trans *trans,
const struct bch_hash_info *info,
subvol_inum inum,
struct bkey_i *insert,
- bch_str_hash_flags_t str_hash_flags)
+ enum btree_iter_update_trigger_flags flags)
{
insert->k.p.inode = inum.inum;
u32 snapshot;
return bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot) ?:
bch2_hash_set_in_snapshot(trans, desc, info, inum,
- snapshot, insert, str_hash_flags, 0);
+ snapshot, insert, flags);
}
static __always_inline
@@ -341,7 +333,7 @@ int bch2_hash_delete_at(struct btree_trans *trans,
const struct bch_hash_desc desc,
const struct bch_hash_info *info,
struct btree_iter *iter,
- unsigned update_flags)
+ enum btree_iter_update_trigger_flags flags)
{
struct bkey_i *delete;
int ret;
@@ -359,7 +351,7 @@ int bch2_hash_delete_at(struct btree_trans *trans,
delete->k.p = iter->pos;
delete->k.type = ret ? KEY_TYPE_hash_whiteout : KEY_TYPE_deleted;
- return bch2_trans_update(trans, iter, delete, update_flags);
+ return bch2_trans_update(trans, iter, delete, flags);
}
static __always_inline
@@ -369,14 +361,10 @@ int bch2_hash_delete(struct btree_trans *trans,
subvol_inum inum, const void *key)
{
struct btree_iter iter;
- int ret;
-
- ret = bch2_hash_lookup(trans, &iter, desc, info, inum, key,
- BTREE_ITER_INTENT);
- if (ret)
- return ret;
-
- ret = bch2_hash_delete_at(trans, desc, info, &iter, 0);
+ struct bkey_s_c k = bch2_hash_lookup(trans, &iter, desc, info, inum, key,
+ BTREE_ITER_intent);
+ int ret = bkey_err(k) ?:
+ bch2_hash_delete_at(trans, desc, info, &iter, 0);
bch2_trans_iter_exit(trans, &iter);
return ret;
}
diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c
index ce7aed121942..132213761ef6 100644
--- a/fs/bcachefs/subvolume.c
+++ b/fs/bcachefs/subvolume.c
@@ -162,7 +162,7 @@ int bch2_check_subvols(struct bch_fs *c)
{
int ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter,
- BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_PREFETCH, k,
+ BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_prefetch, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
check_subvol(trans, &iter, k)));
bch_err_fn(c, ret);
@@ -198,7 +198,7 @@ int bch2_check_subvol_children(struct bch_fs *c)
{
int ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter,
- BTREE_ID_subvolume_children, POS_MIN, BTREE_ITER_PREFETCH, k,
+ BTREE_ID_subvolume_children, POS_MIN, BTREE_ITER_prefetch, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
check_subvol_child(trans, &iter, k)));
bch_err_fn(c, ret);
@@ -208,7 +208,7 @@ int bch2_check_subvol_children(struct bch_fs *c)
/* Subvolumes: */
int bch2_subvolume_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags, struct printbuf *err)
+ enum bch_validate_flags flags, struct printbuf *err)
{
int ret = 0;
@@ -245,9 +245,9 @@ static int subvolume_children_mod(struct btree_trans *trans, struct bpos pos, bo
int bch2_subvolume_trigger(struct btree_trans *trans,
enum btree_id btree_id, unsigned level,
struct bkey_s_c old, struct bkey_s new,
- unsigned flags)
+ enum btree_iter_update_trigger_flags flags)
{
- if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
+ if (flags & BTREE_TRIGGER_transactional) {
struct bpos children_pos_old = subvolume_children_pos(old);
struct bpos children_pos_new = subvolume_children_pos(new.s_c);
@@ -333,7 +333,7 @@ int bch2_subvolume_get_snapshot(struct btree_trans *trans, u32 subvolid,
subvol = bch2_bkey_get_iter_typed(trans, &iter,
BTREE_ID_subvolumes, POS(0, subvolid),
- BTREE_ITER_CACHED|BTREE_ITER_WITH_UPDATES,
+ BTREE_ITER_cached|BTREE_ITER_with_updates,
subvolume);
ret = bkey_err(subvol);
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c,
@@ -383,9 +383,9 @@ static int bch2_subvolumes_reparent(struct btree_trans *trans, u32 subvolid_to_d
return lockrestart_do(trans,
bch2_subvolume_get(trans, subvolid_to_delete, true,
- BTREE_ITER_CACHED, &s)) ?:
+ BTREE_ITER_cached, &s)) ?:
for_each_btree_key_commit(trans, iter,
- BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_PREFETCH, k,
+ BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_prefetch, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
bch2_subvolume_reparent(trans, &iter, k,
subvolid_to_delete, le32_to_cpu(s.creation_parent)));
@@ -404,7 +404,7 @@ static int __bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
subvol = bch2_bkey_get_iter_typed(trans, &iter,
BTREE_ID_subvolumes, POS(0, subvolid),
- BTREE_ITER_CACHED|BTREE_ITER_INTENT,
+ BTREE_ITER_cached|BTREE_ITER_intent,
subvolume);
ret = bkey_err(subvol);
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c,
@@ -505,7 +505,7 @@ int bch2_subvolume_unlink(struct btree_trans *trans, u32 subvolid)
n = bch2_bkey_get_mut_typed(trans, &iter,
BTREE_ID_subvolumes, POS(0, subvolid),
- BTREE_ITER_CACHED, subvolume);
+ BTREE_ITER_cached, subvolume);
ret = PTR_ERR_OR_ZERO(n);
if (unlikely(ret)) {
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c,
@@ -547,7 +547,7 @@ int bch2_subvolume_create(struct btree_trans *trans, u64 inode,
src_subvol = bch2_bkey_get_mut_typed(trans, &src_iter,
BTREE_ID_subvolumes, POS(0, src_subvolid),
- BTREE_ITER_CACHED, subvolume);
+ BTREE_ITER_cached, subvolume);
ret = PTR_ERR_OR_ZERO(src_subvol);
if (unlikely(ret)) {
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
@@ -595,6 +595,78 @@ err:
return ret;
}
+int bch2_initialize_subvolumes(struct bch_fs *c)
+{
+ struct bkey_i_snapshot_tree root_tree;
+ struct bkey_i_snapshot root_snapshot;
+ struct bkey_i_subvolume root_volume;
+ int ret;
+
+ bkey_snapshot_tree_init(&root_tree.k_i);
+ root_tree.k.p.offset = 1;
+ root_tree.v.master_subvol = cpu_to_le32(1);
+ root_tree.v.root_snapshot = cpu_to_le32(U32_MAX);
+
+ bkey_snapshot_init(&root_snapshot.k_i);
+ root_snapshot.k.p.offset = U32_MAX;
+ root_snapshot.v.flags = 0;
+ root_snapshot.v.parent = 0;
+ root_snapshot.v.subvol = cpu_to_le32(BCACHEFS_ROOT_SUBVOL);
+ root_snapshot.v.tree = cpu_to_le32(1);
+ SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true);
+
+ bkey_subvolume_init(&root_volume.k_i);
+ root_volume.k.p.offset = BCACHEFS_ROOT_SUBVOL;
+ root_volume.v.flags = 0;
+ root_volume.v.snapshot = cpu_to_le32(U32_MAX);
+ root_volume.v.inode = cpu_to_le64(BCACHEFS_ROOT_INO);
+
+ ret = bch2_btree_insert(c, BTREE_ID_snapshot_trees, &root_tree.k_i, NULL, 0) ?:
+ bch2_btree_insert(c, BTREE_ID_snapshots, &root_snapshot.k_i, NULL, 0) ?:
+ bch2_btree_insert(c, BTREE_ID_subvolumes, &root_volume.k_i, NULL, 0);
+ bch_err_fn(c, ret);
+ return ret;
+}
+
+static int __bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bch_inode_unpacked inode;
+ int ret;
+
+ k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
+ SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0);
+ ret = bkey_err(k);
+ if (ret)
+ return ret;
+
+ if (!bkey_is_inode(k.k)) {
+ bch_err(trans->c, "root inode not found");
+ ret = -BCH_ERR_ENOENT_inode;
+ goto err;
+ }
+
+ ret = bch2_inode_unpack(k, &inode);
+ BUG_ON(ret);
+
+ inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
+
+ ret = bch2_inode_write(trans, &iter, &inode);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+/* set bi_subvol on root inode */
+int bch2_fs_upgrade_for_subvolumes(struct bch_fs *c)
+{
+ int ret = bch2_trans_do(c, NULL, NULL, BCH_TRANS_COMMIT_lazy_rw,
+ __bch2_fs_upgrade_for_subvolumes(trans));
+ bch_err_fn(c, ret);
+ return ret;
+}
+
int bch2_fs_subvolumes_init(struct bch_fs *c)
{
INIT_WORK(&c->snapshot_delete_work, bch2_delete_dead_snapshots_work);
diff --git a/fs/bcachefs/subvolume.h b/fs/bcachefs/subvolume.h
index 903c05162c06..afa5e871efb2 100644
--- a/fs/bcachefs/subvolume.h
+++ b/fs/bcachefs/subvolume.h
@@ -5,16 +5,17 @@
#include "darray.h"
#include "subvolume_types.h"
-enum bkey_invalid_flags;
+enum bch_validate_flags;
int bch2_check_subvols(struct bch_fs *);
int bch2_check_subvol_children(struct bch_fs *);
int bch2_subvolume_invalid(struct bch_fs *, struct bkey_s_c,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
void bch2_subvolume_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
int bch2_subvolume_trigger(struct btree_trans *, enum btree_id, unsigned,
- struct bkey_s_c, struct bkey_s, unsigned);
+ struct bkey_s_c, struct bkey_s,
+ enum btree_iter_update_trigger_flags);
#define bch2_bkey_ops_subvolume ((struct bkey_ops) { \
.key_invalid = bch2_subvolume_invalid, \
@@ -37,6 +38,9 @@ void bch2_delete_dead_snapshots_async(struct bch_fs *);
int bch2_subvolume_unlink(struct btree_trans *, u32);
int bch2_subvolume_create(struct btree_trans *, u64, u32, u32, u32 *, u32 *, bool);
+int bch2_initialize_subvolumes(struct bch_fs *);
+int bch2_fs_upgrade_for_subvolumes(struct bch_fs *);
+
int bch2_fs_subvolumes_init(struct bch_fs *);
#endif /* _BCACHEFS_SUBVOLUME_H */
diff --git a/fs/bcachefs/subvolume_types.h b/fs/bcachefs/subvolume_types.h
index ae644adfc391..9b10c8947828 100644
--- a/fs/bcachefs/subvolume_types.h
+++ b/fs/bcachefs/subvolume_types.h
@@ -20,6 +20,8 @@ struct snapshot_t {
};
struct snapshot_table {
+ struct rcu_head rcu;
+ size_t nr;
#ifndef RUST_BINDGEN
DECLARE_FLEX_ARRAY(struct snapshot_t, s);
#else
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index ad28e370b640..f1bee6c5222d 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -8,7 +8,7 @@
#include "journal.h"
#include "journal_sb.h"
#include "journal_seq_blacklist.h"
-#include "recovery.h"
+#include "recovery_passes.h"
#include "replicas.h"
#include "quota.h"
#include "sb-clean.h"
@@ -76,7 +76,7 @@ const char * const bch2_sb_fields[] = {
};
static int bch2_sb_field_validate(struct bch_sb *, struct bch_sb_field *,
- struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
struct bch_sb_field *bch2_sb_field_get_id(struct bch_sb *sb,
enum bch_sb_field_type type)
@@ -143,7 +143,7 @@ void bch2_free_super(struct bch_sb_handle *sb)
{
kfree(sb->bio);
if (!IS_ERR_OR_NULL(sb->s_bdev_file))
- fput(sb->s_bdev_file);
+ bdev_fput(sb->s_bdev_file);
kfree(sb->holder);
kfree(sb->sb_name);
@@ -232,7 +232,7 @@ struct bch_sb_field *bch2_sb_field_resize_id(struct bch_sb_handle *sb,
struct bch_sb_handle *dev_sb = &ca->disk_sb;
if (bch2_sb_realloc(dev_sb, le32_to_cpu(dev_sb->sb->u64s) + d)) {
- percpu_ref_put(&ca->ref);
+ percpu_ref_put(&ca->io_ref);
return NULL;
}
}
@@ -344,8 +344,8 @@ static int bch2_sb_compatible(struct bch_sb *sb, struct printbuf *out)
return 0;
}
-static int bch2_sb_validate(struct bch_sb_handle *disk_sb, struct printbuf *out,
- int rw)
+static int bch2_sb_validate(struct bch_sb_handle *disk_sb,
+ enum bch_validate_flags flags, struct printbuf *out)
{
struct bch_sb *sb = disk_sb->sb;
struct bch_sb_field_members_v1 *mi;
@@ -401,7 +401,7 @@ static int bch2_sb_validate(struct bch_sb_handle *disk_sb, struct printbuf *out,
return -BCH_ERR_invalid_sb_time_precision;
}
- if (rw == READ) {
+ if (!flags) {
/*
* Been seeing a bug where these are getting inexplicably
* zeroed, so we're now validating them, but we have to be
@@ -457,7 +457,7 @@ static int bch2_sb_validate(struct bch_sb_handle *disk_sb, struct printbuf *out,
return -BCH_ERR_invalid_sb_members_missing;
}
- ret = bch2_sb_field_validate(sb, &mi->field, out);
+ ret = bch2_sb_field_validate(sb, &mi->field, flags, out);
if (ret)
return ret;
@@ -465,12 +465,12 @@ static int bch2_sb_validate(struct bch_sb_handle *disk_sb, struct printbuf *out,
if (le32_to_cpu(f->type) == BCH_SB_FIELD_members_v1)
continue;
- ret = bch2_sb_field_validate(sb, f, out);
+ ret = bch2_sb_field_validate(sb, f, flags, out);
if (ret)
return ret;
}
- if (rw == WRITE &&
+ if ((flags & BCH_VALIDATE_write) &&
bch2_sb_member_get(sb, sb->dev_idx).seq != sb->seq) {
prt_printf(out, "Invalid superblock: member seq %llu != sb seq %llu",
le64_to_cpu(bch2_sb_member_get(sb, sb->dev_idx).seq),
@@ -527,9 +527,11 @@ static void bch2_sb_update(struct bch_fs *c)
memset(c->sb.errors_silent, 0, sizeof(c->sb.errors_silent));
struct bch_sb_field_ext *ext = bch2_sb_field_get(src, ext);
- if (ext)
+ if (ext) {
le_bitvector_to_cpu(c->sb.errors_silent, (void *) ext->errors_silent,
sizeof(c->sb.errors_silent) * 8);
+ c->sb.btrees_lost_data = le64_to_cpu(ext->btrees_lost_data);
+ }
for_each_member_device(c, ca) {
struct bch_member m = bch2_sb_member_get(src, ca->dev_idx);
@@ -647,7 +649,7 @@ reread:
bytes = vstruct_bytes(sb->sb);
- if (bytes > 512 << sb->sb->layout.sb_max_size_bits) {
+ if (bytes > 512ULL << min(BCH_SB_LAYOUT_SIZE_BITS_MAX, sb->sb->layout.sb_max_size_bits)) {
prt_printf(err, "Invalid superblock: too big (got %zu bytes, layout max %lu)",
bytes, 512UL << sb->sb->layout.sb_max_size_bits);
return -BCH_ERR_invalid_sb_too_big;
@@ -698,8 +700,11 @@ retry:
return -ENOMEM;
sb->sb_name = kstrdup(path, GFP_KERNEL);
- if (!sb->sb_name)
- return -ENOMEM;
+ if (!sb->sb_name) {
+ ret = -ENOMEM;
+ prt_printf(&err, "error allocating memory for sb_name");
+ goto err;
+ }
#ifndef __KERNEL__
if (opt_get(*opts, direct_io) == false)
@@ -814,7 +819,7 @@ got_super:
sb->have_layout = true;
- ret = bch2_sb_validate(sb, &err, READ);
+ ret = bch2_sb_validate(sb, 0, &err);
if (ret) {
bch2_print_opts(opts, KERN_ERR "bcachefs (%s): error validating superblock: %s\n",
path, err.buf);
@@ -918,6 +923,7 @@ int bch2_write_super(struct bch_fs *c)
struct bch_devs_mask sb_written;
bool wrote, can_mount_without_written, can_mount_with_written;
unsigned degraded_flags = BCH_FORCE_IF_DEGRADED;
+ DARRAY(struct bch_dev *) online_devices = {};
int ret = 0;
trace_and_count(c, write_super, c, _RET_IP_);
@@ -930,6 +936,15 @@ int bch2_write_super(struct bch_fs *c)
closure_init_stack(cl);
memset(&sb_written, 0, sizeof(sb_written));
+ for_each_online_member(c, ca) {
+ ret = darray_push(&online_devices, ca);
+ if (bch2_fs_fatal_err_on(ret, c, "%s: error allocating online devices", __func__)) {
+ percpu_ref_put(&ca->io_ref);
+ goto out;
+ }
+ percpu_ref_get(&ca->io_ref);
+ }
+
/* Make sure we're using the new magic numbers: */
c->disk_sb.sb->magic = BCHFS_MAGIC;
c->disk_sb.sb->layout.magic = BCHFS_MAGIC;
@@ -937,8 +952,8 @@ int bch2_write_super(struct bch_fs *c)
le64_add_cpu(&c->disk_sb.sb->seq, 1);
struct bch_sb_field_members_v2 *mi = bch2_sb_field_get(c->disk_sb.sb, members_v2);
- for_each_online_member(c, ca)
- __bch2_members_v2_get_mut(mi, ca->dev_idx)->seq = c->disk_sb.sb->seq;
+ darray_for_each(online_devices, ca)
+ __bch2_members_v2_get_mut(mi, (*ca)->dev_idx)->seq = c->disk_sb.sb->seq;
c->disk_sb.sb->write_time = cpu_to_le64(ktime_get_real_seconds());
if (test_bit(BCH_FS_error, &c->flags))
@@ -954,16 +969,15 @@ int bch2_write_super(struct bch_fs *c)
bch2_sb_errors_from_cpu(c);
bch2_sb_downgrade_update(c);
- for_each_online_member(c, ca)
- bch2_sb_from_fs(c, ca);
+ darray_for_each(online_devices, ca)
+ bch2_sb_from_fs(c, (*ca));
- for_each_online_member(c, ca) {
+ darray_for_each(online_devices, ca) {
printbuf_reset(&err);
- ret = bch2_sb_validate(&ca->disk_sb, &err, WRITE);
+ ret = bch2_sb_validate(&(*ca)->disk_sb, BCH_VALIDATE_write, &err);
if (ret) {
bch2_fs_inconsistent(c, "sb invalid before write: %s", err.buf);
- percpu_ref_put(&ca->io_ref);
goto out;
}
}
@@ -990,53 +1004,67 @@ int bch2_write_super(struct bch_fs *c)
return -BCH_ERR_sb_not_downgraded;
}
- for_each_online_member(c, ca) {
- __set_bit(ca->dev_idx, sb_written.d);
- ca->sb_write_error = 0;
+ darray_for_each(online_devices, ca) {
+ __set_bit((*ca)->dev_idx, sb_written.d);
+ (*ca)->sb_write_error = 0;
}
- for_each_online_member(c, ca)
- read_back_super(c, ca);
+ darray_for_each(online_devices, ca)
+ read_back_super(c, *ca);
closure_sync(cl);
- for_each_online_member(c, ca) {
+ darray_for_each(online_devices, cap) {
+ struct bch_dev *ca = *cap;
+
if (ca->sb_write_error)
continue;
if (le64_to_cpu(ca->sb_read_scratch->seq) < ca->disk_sb.seq) {
- bch2_fs_fatal_error(c,
+ struct printbuf buf = PRINTBUF;
+ prt_char(&buf, ' ');
+ prt_bdevname(&buf, ca->disk_sb.bdev);
+ prt_printf(&buf,
": Superblock write was silently dropped! (seq %llu expected %llu)",
le64_to_cpu(ca->sb_read_scratch->seq),
ca->disk_sb.seq);
- percpu_ref_put(&ca->io_ref);
+ bch2_fs_fatal_error(c, "%s", buf.buf);
+ printbuf_exit(&buf);
ret = -BCH_ERR_erofs_sb_err;
- goto out;
}
if (le64_to_cpu(ca->sb_read_scratch->seq) > ca->disk_sb.seq) {
- bch2_fs_fatal_error(c,
+ struct printbuf buf = PRINTBUF;
+ prt_char(&buf, ' ');
+ prt_bdevname(&buf, ca->disk_sb.bdev);
+ prt_printf(&buf,
": Superblock modified by another process (seq %llu expected %llu)",
le64_to_cpu(ca->sb_read_scratch->seq),
ca->disk_sb.seq);
- percpu_ref_put(&ca->io_ref);
+ bch2_fs_fatal_error(c, "%s", buf.buf);
+ printbuf_exit(&buf);
ret = -BCH_ERR_erofs_sb_err;
- goto out;
}
}
+ if (ret)
+ goto out;
+
do {
wrote = false;
- for_each_online_member(c, ca)
+ darray_for_each(online_devices, cap) {
+ struct bch_dev *ca = *cap;
if (!ca->sb_write_error &&
sb < ca->disk_sb.sb->layout.nr_superblocks) {
write_one_super(c, ca, sb);
wrote = true;
}
+ }
closure_sync(cl);
sb++;
} while (wrote);
- for_each_online_member(c, ca) {
+ darray_for_each(online_devices, cap) {
+ struct bch_dev *ca = *cap;
if (ca->sb_write_error)
__clear_bit(ca->dev_idx, sb_written.d);
else
@@ -1072,6 +1100,9 @@ int bch2_write_super(struct bch_fs *c)
out:
/* Make new options visible after they're persistent: */
bch2_sb_update(c);
+ darray_for_each(online_devices, ca)
+ percpu_ref_put(&(*ca)->io_ref);
+ darray_exit(&online_devices);
printbuf_exit(&err);
return ret;
}
@@ -1130,7 +1161,7 @@ void bch2_sb_upgrade(struct bch_fs *c, unsigned new_version)
}
static int bch2_sb_ext_validate(struct bch_sb *sb, struct bch_sb_field *f,
- struct printbuf *err)
+ enum bch_validate_flags flags, struct printbuf *err)
{
if (vstruct_bytes(f) < 88) {
prt_printf(err, "field too small (%zu < %u)", vstruct_bytes(f), 88);
@@ -1145,8 +1176,7 @@ static void bch2_sb_ext_to_text(struct printbuf *out, struct bch_sb *sb,
{
struct bch_sb_field_ext *e = field_to_type(f, ext);
- prt_printf(out, "Recovery passes required:");
- prt_tab(out);
+ prt_printf(out, "Recovery passes required:\t");
prt_bitflags(out, bch2_recovery_passes,
bch2_recovery_passes_from_stable(le64_to_cpu(e->recovery_passes_required[0])));
prt_newline(out);
@@ -1155,13 +1185,16 @@ static void bch2_sb_ext_to_text(struct printbuf *out, struct bch_sb *sb,
if (errors_silent) {
le_bitvector_to_cpu(errors_silent, (void *) e->errors_silent, sizeof(e->errors_silent) * 8);
- prt_printf(out, "Errors to silently fix:");
- prt_tab(out);
+ prt_printf(out, "Errors to silently fix:\t");
prt_bitflags_vector(out, bch2_sb_error_strs, errors_silent, sizeof(e->errors_silent) * 8);
prt_newline(out);
kfree(errors_silent);
}
+
+ prt_printf(out, "Btrees with missing data:\t");
+ prt_bitflags(out, __bch2_btree_ids, le64_to_cpu(e->btrees_lost_data));
+ prt_newline(out);
}
static const struct bch_sb_field_ops bch_sb_field_ops_ext = {
@@ -1186,14 +1219,14 @@ static const struct bch_sb_field_ops *bch2_sb_field_type_ops(unsigned type)
}
static int bch2_sb_field_validate(struct bch_sb *sb, struct bch_sb_field *f,
- struct printbuf *err)
+ enum bch_validate_flags flags, struct printbuf *err)
{
unsigned type = le32_to_cpu(f->type);
struct printbuf field_err = PRINTBUF;
const struct bch_sb_field_ops *ops = bch2_sb_field_type_ops(type);
int ret;
- ret = ops->validate ? ops->validate(sb, f, &field_err) : 0;
+ ret = ops->validate ? ops->validate(sb, f, flags, &field_err) : 0;
if (ret) {
prt_printf(err, "Invalid superblock section %s: %s",
bch2_sb_fields[type], field_err.buf);
@@ -1267,97 +1300,73 @@ void bch2_sb_to_text(struct printbuf *out, struct bch_sb *sb,
printbuf_tabstop_push(out, 44);
for (int i = 0; i < sb->nr_devices; i++)
- nr_devices += bch2_dev_exists(sb, i);
+ nr_devices += bch2_member_exists(sb, i);
- prt_printf(out, "External UUID:");
- prt_tab(out);
+ prt_printf(out, "External UUID:\t");
pr_uuid(out, sb->user_uuid.b);
prt_newline(out);
- prt_printf(out, "Internal UUID:");
- prt_tab(out);
+ prt_printf(out, "Internal UUID:\t");
pr_uuid(out, sb->uuid.b);
prt_newline(out);
- prt_printf(out, "Magic number:");
- prt_tab(out);
+ prt_printf(out, "Magic number:\t");
pr_uuid(out, sb->magic.b);
prt_newline(out);
- prt_str(out, "Device index:");
- prt_tab(out);
- prt_printf(out, "%u", sb->dev_idx);
- prt_newline(out);
+ prt_printf(out, "Device index:\t%u\n", sb->dev_idx);
- prt_str(out, "Label:");
- prt_tab(out);
+ prt_str(out, "Label:\t");
prt_printf(out, "%.*s", (int) sizeof(sb->label), sb->label);
prt_newline(out);
- prt_str(out, "Version:");
- prt_tab(out);
+ prt_str(out, "Version:\t");
bch2_version_to_text(out, le16_to_cpu(sb->version));
prt_newline(out);
- prt_str(out, "Version upgrade complete:");
- prt_tab(out);
+ prt_str(out, "Version upgrade complete:\t");
bch2_version_to_text(out, BCH_SB_VERSION_UPGRADE_COMPLETE(sb));
prt_newline(out);
- prt_printf(out, "Oldest version on disk:");
- prt_tab(out);
+ prt_printf(out, "Oldest version on disk:\t");
bch2_version_to_text(out, le16_to_cpu(sb->version_min));
prt_newline(out);
- prt_printf(out, "Created:");
- prt_tab(out);
+ prt_printf(out, "Created:\t");
if (sb->time_base_lo)
bch2_prt_datetime(out, div_u64(le64_to_cpu(sb->time_base_lo), NSEC_PER_SEC));
else
prt_printf(out, "(not set)");
prt_newline(out);
- prt_printf(out, "Sequence number:");
- prt_tab(out);
+ prt_printf(out, "Sequence number:\t");
prt_printf(out, "%llu", le64_to_cpu(sb->seq));
prt_newline(out);
- prt_printf(out, "Time of last write:");
- prt_tab(out);
+ prt_printf(out, "Time of last write:\t");
bch2_prt_datetime(out, le64_to_cpu(sb->write_time));
prt_newline(out);
- prt_printf(out, "Superblock size:");
- prt_tab(out);
+ prt_printf(out, "Superblock size:\t");
prt_units_u64(out, vstruct_bytes(sb));
prt_str(out, "/");
prt_units_u64(out, 512ULL << sb->layout.sb_max_size_bits);
prt_newline(out);
- prt_printf(out, "Clean:");
- prt_tab(out);
- prt_printf(out, "%llu", BCH_SB_CLEAN(sb));
- prt_newline(out);
-
- prt_printf(out, "Devices:");
- prt_tab(out);
- prt_printf(out, "%u", nr_devices);
- prt_newline(out);
+ prt_printf(out, "Clean:\t%llu\n", BCH_SB_CLEAN(sb));
+ prt_printf(out, "Devices:\t%u\n", nr_devices);
- prt_printf(out, "Sections:");
+ prt_printf(out, "Sections:\t");
vstruct_for_each(sb, f)
fields_have |= 1 << le32_to_cpu(f->type);
- prt_tab(out);
prt_bitflags(out, bch2_sb_fields, fields_have);
prt_newline(out);
- prt_printf(out, "Features:");
- prt_tab(out);
+ prt_printf(out, "Features:\t");
prt_bitflags(out, bch2_sb_features, le64_to_cpu(sb->features[0]));
prt_newline(out);
- prt_printf(out, "Compat features:");
- prt_tab(out);
+ prt_printf(out, "Compat features:\t");
prt_bitflags(out, bch2_sb_compat, le64_to_cpu(sb->compat[0]));
prt_newline(out);
@@ -1374,8 +1383,7 @@ void bch2_sb_to_text(struct printbuf *out, struct bch_sb *sb,
if (opt->get_sb != BCH2_NO_SB_OPT) {
u64 v = bch2_opt_from_sb(sb, id);
- prt_printf(out, "%s:", opt->attr.name);
- prt_tab(out);
+ prt_printf(out, "%s:\t", opt->attr.name);
bch2_opt_to_text(out, NULL, sb, opt, v,
OPT_HUMAN_READABLE|OPT_SHOW_FULL_LIST);
prt_newline(out);
diff --git a/fs/bcachefs/super-io.h b/fs/bcachefs/super-io.h
index 95e80e06316b..fadd364e2802 100644
--- a/fs/bcachefs/super-io.h
+++ b/fs/bcachefs/super-io.h
@@ -51,7 +51,8 @@ void bch2_sb_field_delete(struct bch_sb_handle *, enum bch_sb_field_type);
extern const char * const bch2_sb_fields[];
struct bch_sb_field_ops {
- int (*validate)(struct bch_sb *, struct bch_sb_field *, struct printbuf *);
+ int (*validate)(struct bch_sb *, struct bch_sb_field *,
+ enum bch_validate_flags, struct printbuf *);
void (*to_text)(struct printbuf *, struct bch_sb *, struct bch_sb_field *);
};
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index 1ad6e5cd9476..294a9d35a9f2 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -15,6 +15,7 @@
#include "btree_gc.h"
#include "btree_journal_iter.h"
#include "btree_key_cache.h"
+#include "btree_node_scan.h"
#include "btree_update_interior.h"
#include "btree_io.h"
#include "btree_write_buffer.h"
@@ -263,7 +264,6 @@ static void __bch2_fs_read_only(struct bch_fs *c)
bch2_open_buckets_stop(c, NULL, true);
bch2_rebalance_stop(c);
bch2_copygc_stop(c);
- bch2_gc_thread_stop(c);
bch2_fs_ec_flush(c);
bch_verbose(c, "flushing journal and stopping allocators, journal seq %llu",
@@ -284,11 +284,16 @@ static void __bch2_fs_read_only(struct bch_fs *c)
bch_verbose(c, "flushing journal and stopping allocators complete, journal seq %llu",
journal_cur_seq(&c->journal));
- if (test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags) &&
+ if (test_bit(JOURNAL_replay_done, &c->journal.flags) &&
!test_bit(BCH_FS_emergency_ro, &c->flags))
set_bit(BCH_FS_clean_shutdown, &c->flags);
+
bch2_fs_journal_stop(&c->journal);
+ bch_info(c, "%sshutdown complete, journal seq %llu",
+ test_bit(BCH_FS_clean_shutdown, &c->flags) ? "" : "un",
+ c->journal.seq_ondisk);
+
/*
* After stopping journal:
*/
@@ -365,7 +370,7 @@ void bch2_fs_read_only(struct bch_fs *c)
!test_bit(BCH_FS_emergency_ro, &c->flags) &&
test_bit(BCH_FS_started, &c->flags) &&
test_bit(BCH_FS_clean_shutdown, &c->flags) &&
- !c->opts.norecovery) {
+ c->recovery_pass_done >= BCH_RECOVERY_PASS_journal_replay) {
BUG_ON(c->journal.last_empty_seq != journal_cur_seq(&c->journal));
BUG_ON(atomic_read(&c->btree_cache.dirty));
BUG_ON(atomic_long_read(&c->btree_key_cache.nr_dirty));
@@ -461,7 +466,8 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
* overwriting whatever was there previously, and there must always be
* at least one non-flush write in the journal or recovery will fail:
*/
- set_bit(JOURNAL_NEED_FLUSH_WRITE, &c->journal.flags);
+ set_bit(JOURNAL_need_flush_write, &c->journal.flags);
+ set_bit(JOURNAL_running, &c->journal.flags);
for_each_rw_member(c, ca)
bch2_dev_allocator_add(c, ca);
@@ -479,12 +485,6 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
}
#endif
- ret = bch2_gc_thread_start(c);
- if (ret) {
- bch_err(c, "error starting gc thread");
- return ret;
- }
-
ret = bch2_journal_reclaim_start(&c->journal);
if (ret)
goto err;
@@ -510,7 +510,8 @@ err:
int bch2_fs_read_write(struct bch_fs *c)
{
- if (c->opts.norecovery)
+ if (c->opts.recovery_pass_last &&
+ c->opts.recovery_pass_last < BCH_RECOVERY_PASS_journal_replay)
return -BCH_ERR_erofs_norecovery;
if (c->opts.nochanges)
@@ -530,12 +531,12 @@ int bch2_fs_read_write_early(struct bch_fs *c)
static void __bch2_fs_free(struct bch_fs *c)
{
- unsigned i;
-
- for (i = 0; i < BCH_TIME_STAT_NR; i++)
+ for (unsigned i = 0; i < BCH_TIME_STAT_NR; i++)
bch2_time_stats_exit(&c->times[i]);
+ bch2_find_btree_nodes_exit(&c->found_btree_nodes);
bch2_free_pending_node_rewrites(c);
+ bch2_fs_allocator_background_exit(c);
bch2_fs_sb_errors_exit(c);
bch2_fs_counters_exit(c);
bch2_fs_snapshots_exit(c);
@@ -559,9 +560,11 @@ static void __bch2_fs_free(struct bch_fs *c)
bch2_io_clock_exit(&c->io_clock[READ]);
bch2_fs_compress_exit(c);
bch2_journal_keys_put_initial(c);
+ bch2_find_btree_nodes_exit(&c->found_btree_nodes);
BUG_ON(atomic_read(&c->journal_keys.ref));
bch2_fs_btree_write_buffer_exit(c);
percpu_free_rwsem(&c->mark_lock);
+ EBUG_ON(percpu_u64_get(c->online_reserved));
free_percpu(c->online_reserved);
darray_exit(&c->btree_roots_extra);
@@ -606,8 +609,6 @@ void __bch2_fs_stop(struct bch_fs *c)
set_bit(BCH_FS_stopping, &c->flags);
- cancel_work_sync(&c->journal_seq_blacklist_gc_work);
-
down_write(&c->state_lock);
bch2_fs_read_only(c);
up_write(&c->state_lock);
@@ -655,6 +656,7 @@ void bch2_fs_free(struct bch_fs *c)
struct bch_dev *ca = rcu_dereference_protected(c->devs[i], true);
if (ca) {
+ EBUG_ON(atomic_long_read(&ca->ref) != 1);
bch2_free_super(&ca->disk_sb);
bch2_dev_free(ca);
}
@@ -709,7 +711,7 @@ static int bch2_fs_online(struct bch_fs *c)
ret = bch2_dev_sysfs_online(c, ca);
if (ret) {
bch_err(c, "error creating sysfs objects");
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
goto err;
}
}
@@ -768,6 +770,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
for (i = 0; i < BCH_TIME_STAT_NR; i++)
bch2_time_stats_init(&c->times[i]);
+ bch2_fs_gc_init(c);
bch2_fs_copygc_init(c);
bch2_fs_btree_key_cache_init_early(&c->btree_key_cache);
bch2_fs_btree_iter_init_early(c);
@@ -790,16 +793,11 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
spin_lock_init(&c->btree_write_error_lock);
- INIT_WORK(&c->journal_seq_blacklist_gc_work,
- bch2_blacklist_entries_gc);
-
INIT_LIST_HEAD(&c->journal_iters);
INIT_LIST_HEAD(&c->fsck_error_msgs);
mutex_init(&c->fsck_error_msgs_lock);
- seqcount_init(&c->gc_pos_lock);
-
seqcount_init(&c->usage_lock);
sema_init(&c->io_in_flight, 128);
@@ -930,7 +928,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
goto err;
for (i = 0; i < c->sb.nr_devices; i++)
- if (bch2_dev_exists(c->disk_sb.sb, i) &&
+ if (bch2_member_exists(c->disk_sb.sb, i) &&
bch2_dev_alloc(c, i)) {
ret = -EEXIST;
goto err;
@@ -1015,8 +1013,16 @@ int bch2_fs_start(struct bch_fs *c)
for_each_online_member(c, ca)
bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount = cpu_to_le64(now);
+ struct bch_sb_field_ext *ext =
+ bch2_sb_field_get_minsize(&c->disk_sb, ext, sizeof(*ext) / sizeof(u64));
mutex_unlock(&c->sb_lock);
+ if (!ext) {
+ bch_err(c, "insufficient space in superblock for sb_field_ext");
+ ret = -BCH_ERR_ENOSPC_sb;
+ goto err;
+ }
+
for_each_rw_member(c, ca)
bch2_dev_allocator_add(c, ca);
bch2_recalc_capacity(c);
@@ -1083,7 +1089,7 @@ static int bch2_dev_in_fs(struct bch_sb_handle *fs,
if (!uuid_equal(&fs->sb->uuid, &sb->sb->uuid))
return -BCH_ERR_device_not_a_member_of_filesystem;
- if (!bch2_dev_exists(fs->sb, sb->sb->dev_idx))
+ if (!bch2_member_exists(fs->sb, sb->sb->dev_idx))
return -BCH_ERR_device_has_been_removed;
if (fs->sb->block_size != sb->sb->block_size)
@@ -1182,11 +1188,11 @@ static void bch2_dev_free(struct bch_dev *ca)
if (ca->kobj.state_in_sysfs)
kobject_del(&ca->kobj);
+ kfree(ca->buckets_nouse);
bch2_free_super(&ca->disk_sb);
bch2_dev_journal_exit(ca);
free_percpu(ca->io_done);
- bioset_exit(&ca->replica_set);
bch2_dev_buckets_free(ca);
free_page((unsigned long) ca->sb_read_scratch);
@@ -1194,7 +1200,9 @@ static void bch2_dev_free(struct bch_dev *ca)
bch2_time_stats_quantiles_exit(&ca->io_latency[READ]);
percpu_ref_exit(&ca->io_ref);
+#ifndef CONFIG_BCACHEFS_DEBUG
percpu_ref_exit(&ca->ref);
+#endif
kobject_put(&ca->kobj);
}
@@ -1221,12 +1229,14 @@ static void __bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca)
bch2_dev_journal_exit(ca);
}
+#ifndef CONFIG_BCACHEFS_DEBUG
static void bch2_dev_ref_complete(struct percpu_ref *ref)
{
struct bch_dev *ca = container_of(ref, struct bch_dev, ref);
complete(&ca->ref_completion);
}
+#endif
static void bch2_dev_io_ref_complete(struct percpu_ref *ref)
{
@@ -1295,14 +1305,17 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
ca->nr_btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
ca->mi.bucket_size / btree_sectors(c));
- if (percpu_ref_init(&ca->ref, bch2_dev_ref_complete,
- 0, GFP_KERNEL) ||
- percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete,
+#ifndef CONFIG_BCACHEFS_DEBUG
+ if (percpu_ref_init(&ca->ref, bch2_dev_ref_complete, 0, GFP_KERNEL))
+ goto err;
+#else
+ atomic_long_set(&ca->ref, 1);
+#endif
+
+ if (percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete,
PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
!(ca->sb_read_scratch = (void *) __get_free_page(GFP_KERNEL)) ||
bch2_dev_buckets_alloc(c, ca) ||
- bioset_init(&ca->replica_set, 4,
- offsetof(struct bch_write_bio, bio), 0) ||
!(ca->io_done = alloc_percpu(*ca->io_done)))
goto err;
@@ -1393,10 +1406,9 @@ static int bch2_dev_attach_bdev(struct bch_fs *c, struct bch_sb_handle *sb)
le64_to_cpu(c->disk_sb.sb->seq))
bch2_sb_to_fs(c, sb->sb);
- BUG_ON(sb->sb->dev_idx >= c->sb.nr_devices ||
- !c->devs[sb->sb->dev_idx]);
+ BUG_ON(!bch2_dev_exists(c, sb->sb->dev_idx));
- ca = bch_dev_locked(c, sb->sb->dev_idx);
+ ca = bch2_dev_locked(c, sb->sb->dev_idx);
ret = __bch2_dev_attach_bdev(ca, sb);
if (ret)
@@ -1488,10 +1500,10 @@ static bool bch2_fs_may_start(struct bch_fs *c)
mutex_lock(&c->sb_lock);
for (i = 0; i < c->disk_sb.sb->nr_devices; i++) {
- if (!bch2_dev_exists(c->disk_sb.sb, i))
+ if (!bch2_member_exists(c->disk_sb.sb, i))
continue;
- ca = bch_dev_locked(c, i);
+ ca = bch2_dev_locked(c, i);
if (!bch2_dev_is_online(ca) &&
(ca->mi.state == BCH_MEMBER_STATE_rw ||
@@ -1581,17 +1593,17 @@ static int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
* with bch2_do_invalidates() and bch2_do_discards()
*/
ret = bch2_btree_delete_range(c, BTREE_ID_lru, start, end,
- BTREE_TRIGGER_NORUN, NULL) ?:
+ BTREE_TRIGGER_norun, NULL) ?:
bch2_btree_delete_range(c, BTREE_ID_need_discard, start, end,
- BTREE_TRIGGER_NORUN, NULL) ?:
+ BTREE_TRIGGER_norun, NULL) ?:
bch2_btree_delete_range(c, BTREE_ID_freespace, start, end,
- BTREE_TRIGGER_NORUN, NULL) ?:
+ BTREE_TRIGGER_norun, NULL) ?:
bch2_btree_delete_range(c, BTREE_ID_backpointers, start, end,
- BTREE_TRIGGER_NORUN, NULL) ?:
+ BTREE_TRIGGER_norun, NULL) ?:
bch2_btree_delete_range(c, BTREE_ID_alloc, start, end,
- BTREE_TRIGGER_NORUN, NULL) ?:
+ BTREE_TRIGGER_norun, NULL) ?:
bch2_btree_delete_range(c, BTREE_ID_bucket_gens, start, end,
- BTREE_TRIGGER_NORUN, NULL);
+ BTREE_TRIGGER_norun, NULL);
bch_err_msg(c, ret, "removing dev alloc info");
return ret;
}
@@ -1608,7 +1620,7 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
* We consume a reference to ca->ref, regardless of whether we succeed
* or fail:
*/
- percpu_ref_put(&ca->ref);
+ bch2_dev_put(ca);
if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
bch_err(ca, "Cannot remove without losing data");
@@ -1660,7 +1672,12 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
rcu_assign_pointer(c->devs[ca->dev_idx], NULL);
mutex_unlock(&c->sb_lock);
+#ifndef CONFIG_BCACHEFS_DEBUG
percpu_ref_kill(&ca->ref);
+#else
+ ca->dying = true;
+ bch2_dev_put(ca);
+#endif
wait_for_completion(&ca->ref_completion);
bch2_dev_free(ca);
@@ -1759,9 +1776,28 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
if (dynamic_fault("bcachefs:add:no_slot"))
goto no_slot;
- for (dev_idx = 0; dev_idx < BCH_SB_MEMBERS_MAX; dev_idx++)
- if (!bch2_dev_exists(c->disk_sb.sb, dev_idx))
- goto have_slot;
+ if (c->sb.nr_devices < BCH_SB_MEMBERS_MAX) {
+ dev_idx = c->sb.nr_devices;
+ goto have_slot;
+ }
+
+ int best = -1;
+ u64 best_last_mount = 0;
+ for (dev_idx = 0; dev_idx < BCH_SB_MEMBERS_MAX; dev_idx++) {
+ struct bch_member m = bch2_sb_member_get(c->disk_sb.sb, dev_idx);
+ if (bch2_member_alive(&m))
+ continue;
+
+ u64 last_mount = le64_to_cpu(m.last_mount);
+ if (best < 0 || last_mount < best_last_mount) {
+ best = dev_idx;
+ best_last_mount = last_mount;
+ }
+ }
+ if (best >= 0) {
+ dev_idx = best;
+ goto have_slot;
+ }
no_slot:
ret = -BCH_ERR_ENOSPC_sb_members;
bch_err_msg(c, ret, "setting up new superblock");
@@ -1803,7 +1839,7 @@ have_slot:
bch2_dev_usage_journal_reserve(c);
- ret = bch2_trans_mark_dev_sb(c, ca);
+ ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_transactional);
bch_err_msg(ca, ret, "marking new superblock");
if (ret)
goto err_late;
@@ -1866,9 +1902,9 @@ int bch2_dev_online(struct bch_fs *c, const char *path)
if (ret)
goto err;
- ca = bch_dev_locked(c, dev_idx);
+ ca = bch2_dev_locked(c, dev_idx);
- ret = bch2_trans_mark_dev_sb(c, ca);
+ ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_transactional);
bch_err_msg(c, ret, "bringing %s online: error from bch2_trans_mark_dev_sb", path);
if (ret)
goto err;
@@ -1941,6 +1977,13 @@ int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
goto err;
}
+ if (nbuckets > BCH_MEMBER_NBUCKETS_MAX) {
+ bch_err(ca, "New device size too big (%llu greater than max %u)",
+ nbuckets, BCH_MEMBER_NBUCKETS_MAX);
+ ret = -BCH_ERR_device_size_too_big;
+ goto err;
+ }
+
if (bch2_dev_is_online(ca) &&
get_capacity(ca->disk_sb.bdev->bd_disk) <
ca->mi.bucket_size * nbuckets) {
@@ -1954,7 +1997,7 @@ int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
if (ret)
goto err;
- ret = bch2_trans_mark_dev_sb(c, ca);
+ ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_transactional);
if (ret)
goto err;
@@ -1986,13 +2029,9 @@ err:
/* return with ref on ca->ref: */
struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *name)
{
- rcu_read_lock();
- for_each_member_device_rcu(c, ca, NULL)
- if (!strcmp(name, ca->name)) {
- rcu_read_unlock();
+ for_each_member_device(c, ca)
+ if (!strcmp(name, ca->name))
return ca;
- }
- rcu_read_unlock();
return ERR_PTR(-BCH_ERR_ENOENT_dev_not_found);
}
diff --git a/fs/bcachefs/super_types.h b/fs/bcachefs/super_types.h
index ec784d975f66..368a63d938cf 100644
--- a/fs/bcachefs/super_types.h
+++ b/fs/bcachefs/super_types.h
@@ -26,17 +26,4 @@ struct bch_devs_list {
u8 data[BCH_BKEY_PTRS_MAX];
};
-struct bch_member_cpu {
- u64 nbuckets; /* device size */
- u16 first_bucket; /* index of first bucket used */
- u16 bucket_size; /* sectors */
- u16 group;
- u8 state;
- u8 discard;
- u8 data_allowed;
- u8 durability;
- u8 freespace_initialized;
- u8 valid;
-};
-
#endif /* _BCACHEFS_SUPER_TYPES_H */
diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c
index c86a93a8d8fc..93ca74d108b1 100644
--- a/fs/bcachefs/sysfs.c
+++ b/fs/bcachefs/sysfs.c
@@ -17,7 +17,6 @@
#include "btree_iter.h"
#include "btree_key_cache.h"
#include "btree_update.h"
-#include "btree_update_interior.h"
#include "btree_gc.h"
#include "buckets.h"
#include "clock.h"
@@ -26,6 +25,7 @@
#include "ec.h"
#include "inode.h"
#include "journal.h"
+#include "journal_reclaim.h"
#include "keylist.h"
#include "move.h"
#include "movinggc.h"
@@ -139,9 +139,9 @@ do { \
write_attribute(trigger_gc);
write_attribute(trigger_discards);
write_attribute(trigger_invalidates);
-write_attribute(prune_cache);
-write_attribute(btree_wakeup);
-rw_attribute(btree_gc_periodic);
+write_attribute(trigger_journal_flush);
+write_attribute(trigger_btree_cache_shrink);
+write_attribute(trigger_btree_key_cache_shrink);
rw_attribute(gc_gens_pos);
read_attribute(uuid);
@@ -166,7 +166,6 @@ read_attribute(btree_write_stats);
read_attribute(btree_cache_size);
read_attribute(compression_stats);
read_attribute(journal_debug);
-read_attribute(btree_updates);
read_attribute(btree_cache);
read_attribute(btree_key_cache);
read_attribute(stripes_heap);
@@ -189,12 +188,8 @@ static void bch2_write_refs_to_text(struct printbuf *out, struct bch_fs *c)
{
bch2_printbuf_tabstop_push(out, 24);
- for (unsigned i = 0; i < ARRAY_SIZE(c->writes); i++) {
- prt_str(out, bch2_write_refs[i]);
- prt_tab(out);
- prt_printf(out, "%li", atomic_long_read(&c->writes[i]));
- prt_newline(out);
- }
+ for (unsigned i = 0; i < ARRAY_SIZE(c->writes); i++)
+ prt_printf(out, "%s\t%li\n", bch2_write_refs[i], atomic_long_read(&c->writes[i]));
}
#endif
@@ -278,7 +273,7 @@ static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c
continue;
ret = for_each_btree_key(trans, iter, id, POS_MIN,
- BTREE_ITER_ALL_SNAPSHOTS, k, ({
+ BTREE_ITER_all_snapshots, k, ({
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
struct bch_extent_crc_unpacked crc;
const union bch_extent_entry *entry;
@@ -313,22 +308,11 @@ static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c
if (ret)
return ret;
- prt_str(out, "type");
printbuf_tabstop_push(out, 12);
- prt_tab(out);
-
- prt_str(out, "compressed");
printbuf_tabstop_push(out, 16);
- prt_tab_rjust(out);
-
- prt_str(out, "uncompressed");
printbuf_tabstop_push(out, 16);
- prt_tab_rjust(out);
-
- prt_str(out, "average extent size");
printbuf_tabstop_push(out, 24);
- prt_tab_rjust(out);
- prt_newline(out);
+ prt_printf(out, "type\tcompressed\runcompressed\raverage extent size\r\n");
for (unsigned i = 0; i < ARRAY_SIZE(s); i++) {
bch2_prt_compression_type(out, i);
@@ -362,21 +346,6 @@ static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c)
prt_printf(out, "\n");
}
-static void bch2_btree_wakeup_all(struct bch_fs *c)
-{
- struct btree_trans *trans;
-
- seqmutex_lock(&c->btree_trans_lock);
- list_for_each_entry(trans, &c->btree_trans_list, list) {
- struct btree_bkey_cached_common *b = READ_ONCE(trans->locking);
-
- if (b)
- six_lock_wakeup_all(&b->lock);
-
- }
- seqmutex_unlock(&c->btree_trans_lock);
-}
-
SHOW(bch2_fs)
{
struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
@@ -392,8 +361,6 @@ SHOW(bch2_fs)
if (attr == &sysfs_btree_write_stats)
bch2_btree_write_stats_to_text(out, c);
- sysfs_printf(btree_gc_periodic, "%u", (int) c->btree_gc_periodic);
-
if (attr == &sysfs_gc_gens_pos)
bch2_gc_gens_pos_to_text(out, c);
@@ -415,11 +382,8 @@ SHOW(bch2_fs)
if (attr == &sysfs_journal_debug)
bch2_journal_debug_to_text(out, &c->journal);
- if (attr == &sysfs_btree_updates)
- bch2_btree_updates_to_text(out, c);
-
if (attr == &sysfs_btree_cache)
- bch2_btree_cache_to_text(out, c);
+ bch2_btree_cache_to_text(out, &c->btree_cache);
if (attr == &sysfs_btree_key_cache)
bch2_btree_key_cache_to_text(out, &c->btree_key_cache);
@@ -462,6 +426,9 @@ SHOW(bch2_fs)
if (attr == &sysfs_disk_groups)
bch2_disk_groups_to_text(out, c);
+ if (attr == &sysfs_alloc_debug)
+ bch2_fs_alloc_debug_to_text(out, c);
+
return 0;
}
@@ -469,14 +436,6 @@ STORE(bch2_fs)
{
struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
- if (attr == &sysfs_btree_gc_periodic) {
- ssize_t ret = strtoul_safe(buf, c->btree_gc_periodic)
- ?: (ssize_t) size;
-
- wake_up_process(c->gc_thread);
- return ret;
- }
-
if (attr == &sysfs_copy_gc_enabled) {
ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled)
?: (ssize_t) size;
@@ -505,10 +464,10 @@ STORE(bch2_fs)
/* Debugging: */
- if (!test_bit(BCH_FS_rw, &c->flags))
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_sysfs))
return -EROFS;
- if (attr == &sysfs_prune_cache) {
+ if (attr == &sysfs_trigger_btree_cache_shrink) {
struct shrink_control sc;
sc.gfp_mask = GFP_KERNEL;
@@ -516,28 +475,28 @@ STORE(bch2_fs)
c->btree_cache.shrink->scan_objects(c->btree_cache.shrink, &sc);
}
- if (attr == &sysfs_btree_wakeup)
- bch2_btree_wakeup_all(c);
-
- if (attr == &sysfs_trigger_gc) {
- /*
- * Full gc is currently incompatible with btree key cache:
- */
-#if 0
- down_read(&c->state_lock);
- bch2_gc(c, false, false);
- up_read(&c->state_lock);
-#else
- bch2_gc_gens(c);
-#endif
+ if (attr == &sysfs_trigger_btree_key_cache_shrink) {
+ struct shrink_control sc;
+
+ sc.gfp_mask = GFP_KERNEL;
+ sc.nr_to_scan = strtoul_or_return(buf);
+ c->btree_key_cache.shrink->scan_objects(c->btree_cache.shrink, &sc);
}
+ if (attr == &sysfs_trigger_gc)
+ bch2_gc_gens(c);
+
if (attr == &sysfs_trigger_discards)
bch2_do_discards(c);
if (attr == &sysfs_trigger_invalidates)
bch2_do_invalidates(c);
+ if (attr == &sysfs_trigger_journal_flush) {
+ bch2_journal_flush_all_pins(&c->journal);
+ bch2_journal_meta(&c->journal);
+ }
+
#ifdef CONFIG_BCACHEFS_TESTS
if (attr == &sysfs_perf_test) {
char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp;
@@ -558,6 +517,7 @@ STORE(bch2_fs)
size = ret;
}
#endif
+ bch2_write_ref_put(c, BCH_WRITE_REF_sysfs);
return size;
}
SYSFS_OPS(bch2_fs);
@@ -591,13 +551,11 @@ SHOW(bch2_fs_counters)
if (attr == &sysfs_##t) { \
counter = percpu_u64_get(&c->counters[BCH_COUNTER_##t]);\
counter_since_mount = counter - c->counters_on_mount[BCH_COUNTER_##t];\
- prt_printf(out, "since mount:"); \
- prt_tab(out); \
+ prt_printf(out, "since mount:\t"); \
prt_human_readable_u64(out, counter_since_mount); \
prt_newline(out); \
\
- prt_printf(out, "since filesystem creation:"); \
- prt_tab(out); \
+ prt_printf(out, "since filesystem creation:\t"); \
prt_human_readable_u64(out, counter); \
prt_newline(out); \
}
@@ -639,7 +597,6 @@ SYSFS_OPS(bch2_fs_internal);
struct attribute *bch2_fs_internal_files[] = {
&sysfs_flags,
&sysfs_journal_debug,
- &sysfs_btree_updates,
&sysfs_btree_cache,
&sysfs_btree_key_cache,
&sysfs_new_stripes,
@@ -657,8 +614,9 @@ struct attribute *bch2_fs_internal_files[] = {
&sysfs_trigger_gc,
&sysfs_trigger_discards,
&sysfs_trigger_invalidates,
- &sysfs_prune_cache,
- &sysfs_btree_wakeup,
+ &sysfs_trigger_journal_flush,
+ &sysfs_trigger_btree_cache_shrink,
+ &sysfs_trigger_btree_key_cache_shrink,
&sysfs_gc_gens_pos,
@@ -674,6 +632,7 @@ struct attribute *bch2_fs_internal_files[] = {
&sysfs_internal_uuid,
&sysfs_disk_groups,
+ &sysfs_alloc_debug,
NULL
};
@@ -789,88 +748,6 @@ struct attribute *bch2_fs_time_stats_files[] = {
NULL
};
-static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
-{
- struct bch_fs *c = ca->fs;
- struct bch_dev_usage stats = bch2_dev_usage_read(ca);
- unsigned i, nr[BCH_DATA_NR];
-
- memset(nr, 0, sizeof(nr));
-
- for (i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
- nr[c->open_buckets[i].data_type]++;
-
- printbuf_tabstop_push(out, 8);
- printbuf_tabstop_push(out, 16);
- printbuf_tabstop_push(out, 16);
- printbuf_tabstop_push(out, 16);
- printbuf_tabstop_push(out, 16);
-
- bch2_dev_usage_to_text(out, &stats);
-
- prt_newline(out);
-
- prt_printf(out, "reserves:");
- prt_newline(out);
- for (i = 0; i < BCH_WATERMARK_NR; i++) {
- prt_str(out, bch2_watermarks[i]);
- prt_tab(out);
- prt_u64(out, bch2_dev_buckets_reserved(ca, i));
- prt_tab_rjust(out);
- prt_newline(out);
- }
-
- prt_newline(out);
-
- printbuf_tabstops_reset(out);
- printbuf_tabstop_push(out, 24);
-
- prt_str(out, "freelist_wait");
- prt_tab(out);
- prt_str(out, c->freelist_wait.list.first ? "waiting" : "empty");
- prt_newline(out);
-
- prt_str(out, "open buckets allocated");
- prt_tab(out);
- prt_u64(out, OPEN_BUCKETS_COUNT - c->open_buckets_nr_free);
- prt_newline(out);
-
- prt_str(out, "open buckets this dev");
- prt_tab(out);
- prt_u64(out, ca->nr_open_buckets);
- prt_newline(out);
-
- prt_str(out, "open buckets total");
- prt_tab(out);
- prt_u64(out, OPEN_BUCKETS_COUNT);
- prt_newline(out);
-
- prt_str(out, "open_buckets_wait");
- prt_tab(out);
- prt_str(out, c->open_buckets_wait.list.first ? "waiting" : "empty");
- prt_newline(out);
-
- prt_str(out, "open_buckets_btree");
- prt_tab(out);
- prt_u64(out, nr[BCH_DATA_btree]);
- prt_newline(out);
-
- prt_str(out, "open_buckets_user");
- prt_tab(out);
- prt_u64(out, nr[BCH_DATA_user]);
- prt_newline(out);
-
- prt_str(out, "buckets_to_invalidate");
- prt_tab(out);
- prt_u64(out, should_invalidate_buckets(ca, stats));
- prt_newline(out);
-
- prt_str(out, "btree reserve cache");
- prt_tab(out);
- prt_u64(out, c->btree_reserve_cache_nr);
- prt_newline(out);
-}
-
static const char * const bch2_rw[] = {
"read",
"write",
@@ -940,7 +817,7 @@ SHOW(bch2_dev)
* 100 / CONGESTED_MAX);
if (attr == &sysfs_alloc_debug)
- dev_alloc_debug_to_text(out, ca);
+ bch2_dev_alloc_debug_to_text(out, ca);
return 0;
}
diff --git a/fs/bcachefs/tests.c b/fs/bcachefs/tests.c
index b3fe9fc57747..68104b2056d9 100644
--- a/fs/bcachefs/tests.c
+++ b/fs/bcachefs/tests.c
@@ -40,7 +40,7 @@ static int test_delete(struct bch_fs *c, u64 nr)
k.k.p.snapshot = U32_MAX;
bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, k.k.p,
- BTREE_ITER_INTENT);
+ BTREE_ITER_intent);
ret = commit_do(trans, NULL, NULL, 0,
bch2_btree_iter_traverse(&iter) ?:
@@ -81,7 +81,7 @@ static int test_delete_written(struct bch_fs *c, u64 nr)
k.k.p.snapshot = U32_MAX;
bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, k.k.p,
- BTREE_ITER_INTENT);
+ BTREE_ITER_intent);
ret = commit_do(trans, NULL, NULL, 0,
bch2_btree_iter_traverse(&iter) ?:
@@ -261,7 +261,7 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
ret = bch2_trans_run(c,
for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs,
SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
- BTREE_ITER_SLOTS, k, ({
+ BTREE_ITER_slots, k, ({
if (i >= nr * 2)
break;
@@ -322,7 +322,7 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
ret = bch2_trans_run(c,
for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
- BTREE_ITER_SLOTS, k, ({
+ BTREE_ITER_slots, k, ({
if (i == nr)
break;
BUG_ON(bkey_deleted(k.k) != !(i % 16));
@@ -452,7 +452,7 @@ static int insert_test_overlapping_extent(struct bch_fs *c, u64 inum, u64 start,
ret = bch2_trans_do(c, NULL, NULL, 0,
bch2_btree_insert_nonextent(trans, BTREE_ID_extents, &k.k_i,
- BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE));
+ BTREE_UPDATE_internal_snapshot_node));
bch_err_fn(c, ret);
return ret;
}
@@ -671,8 +671,8 @@ static int __do_delete(struct btree_trans *trans, struct bpos pos)
int ret = 0;
bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, pos,
- BTREE_ITER_INTENT);
- k = bch2_btree_iter_peek(&iter);
+ BTREE_ITER_intent);
+ k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX));
ret = bkey_err(k);
if (ret)
goto err;
@@ -714,7 +714,7 @@ static int seq_insert(struct bch_fs *c, u64 nr)
return bch2_trans_run(c,
for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
SPOS(0, 0, U32_MAX),
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k,
+ BTREE_ITER_slots|BTREE_ITER_intent, k,
NULL, NULL, 0, ({
if (iter.pos.offset >= nr)
break;
@@ -737,7 +737,7 @@ static int seq_overwrite(struct bch_fs *c, u64 nr)
return bch2_trans_run(c,
for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
SPOS(0, 0, U32_MAX),
- BTREE_ITER_INTENT, k,
+ BTREE_ITER_intent, k,
NULL, NULL, 0, ({
struct bkey_i_cookie u;
diff --git a/fs/bcachefs/thread_with_file.c b/fs/bcachefs/thread_with_file.c
index 940db15d6a93..b1af7ac430f6 100644
--- a/fs/bcachefs/thread_with_file.c
+++ b/fs/bcachefs/thread_with_file.c
@@ -294,16 +294,27 @@ static int thread_with_stdio_fn(void *arg)
return 0;
}
-int bch2_run_thread_with_stdio(struct thread_with_stdio *thr,
- const struct thread_with_stdio_ops *ops)
+void bch2_thread_with_stdio_init(struct thread_with_stdio *thr,
+ const struct thread_with_stdio_ops *ops)
{
stdio_buf_init(&thr->stdio.input);
stdio_buf_init(&thr->stdio.output);
thr->ops = ops;
+}
+int __bch2_run_thread_with_stdio(struct thread_with_stdio *thr)
+{
return bch2_run_thread_with_file(&thr->thr, &thread_with_stdio_fops, thread_with_stdio_fn);
}
+int bch2_run_thread_with_stdio(struct thread_with_stdio *thr,
+ const struct thread_with_stdio_ops *ops)
+{
+ bch2_thread_with_stdio_init(thr, ops);
+
+ return __bch2_run_thread_with_stdio(thr);
+}
+
int bch2_run_thread_with_stdout(struct thread_with_stdio *thr,
const struct thread_with_stdio_ops *ops)
{
diff --git a/fs/bcachefs/thread_with_file.h b/fs/bcachefs/thread_with_file.h
index af54ea8f5b0f..1d63d14d7dca 100644
--- a/fs/bcachefs/thread_with_file.h
+++ b/fs/bcachefs/thread_with_file.h
@@ -63,6 +63,9 @@ struct thread_with_stdio {
const struct thread_with_stdio_ops *ops;
};
+void bch2_thread_with_stdio_init(struct thread_with_stdio *,
+ const struct thread_with_stdio_ops *);
+int __bch2_run_thread_with_stdio(struct thread_with_stdio *);
int bch2_run_thread_with_stdio(struct thread_with_stdio *,
const struct thread_with_stdio_ops *);
int bch2_run_thread_with_stdout(struct thread_with_stdio *,
diff --git a/fs/bcachefs/trace.h b/fs/bcachefs/trace.h
index 6aa81d1e6d36..362e1fc7ef6a 100644
--- a/fs/bcachefs/trace.h
+++ b/fs/bcachefs/trace.h
@@ -638,99 +638,14 @@ DEFINE_EVENT(bch_fs, gc_gens_end,
/* Allocator */
-DECLARE_EVENT_CLASS(bucket_alloc,
- TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
- u64 bucket,
- u64 free,
- u64 avail,
- u64 copygc_wait_amount,
- s64 copygc_waiting_for,
- struct bucket_alloc_state *s,
- bool nonblocking,
- const char *err),
- TP_ARGS(ca, alloc_reserve, bucket, free, avail,
- copygc_wait_amount, copygc_waiting_for,
- s, nonblocking, err),
-
- TP_STRUCT__entry(
- __field(u8, dev )
- __array(char, reserve, 16 )
- __field(u64, bucket )
- __field(u64, free )
- __field(u64, avail )
- __field(u64, copygc_wait_amount )
- __field(s64, copygc_waiting_for )
- __field(u64, seen )
- __field(u64, open )
- __field(u64, need_journal_commit )
- __field(u64, nouse )
- __field(bool, nonblocking )
- __field(u64, nocow )
- __array(char, err, 32 )
- ),
-
- TP_fast_assign(
- __entry->dev = ca->dev_idx;
- strscpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
- __entry->bucket = bucket;
- __entry->free = free;
- __entry->avail = avail;
- __entry->copygc_wait_amount = copygc_wait_amount;
- __entry->copygc_waiting_for = copygc_waiting_for;
- __entry->seen = s->buckets_seen;
- __entry->open = s->skipped_open;
- __entry->need_journal_commit = s->skipped_need_journal_commit;
- __entry->nouse = s->skipped_nouse;
- __entry->nonblocking = nonblocking;
- __entry->nocow = s->skipped_nocow;
- strscpy(__entry->err, err, sizeof(__entry->err));
- ),
-
- TP_printk("reserve %s bucket %u:%llu free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nocow %llu nonblocking %u err %s",
- __entry->reserve,
- __entry->dev,
- __entry->bucket,
- __entry->free,
- __entry->avail,
- __entry->copygc_wait_amount,
- __entry->copygc_waiting_for,
- __entry->seen,
- __entry->open,
- __entry->need_journal_commit,
- __entry->nouse,
- __entry->nocow,
- __entry->nonblocking,
- __entry->err)
+DEFINE_EVENT(fs_str, bucket_alloc,
+ TP_PROTO(struct bch_fs *c, const char *str),
+ TP_ARGS(c, str)
);
-DEFINE_EVENT(bucket_alloc, bucket_alloc,
- TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
- u64 bucket,
- u64 free,
- u64 avail,
- u64 copygc_wait_amount,
- s64 copygc_waiting_for,
- struct bucket_alloc_state *s,
- bool nonblocking,
- const char *err),
- TP_ARGS(ca, alloc_reserve, bucket, free, avail,
- copygc_wait_amount, copygc_waiting_for,
- s, nonblocking, err)
-);
-
-DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
- TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
- u64 bucket,
- u64 free,
- u64 avail,
- u64 copygc_wait_amount,
- s64 copygc_waiting_for,
- struct bucket_alloc_state *s,
- bool nonblocking,
- const char *err),
- TP_ARGS(ca, alloc_reserve, bucket, free, avail,
- copygc_wait_amount, copygc_waiting_for,
- s, nonblocking, err)
+DEFINE_EVENT(fs_str, bucket_alloc_fail,
+ TP_PROTO(struct bch_fs *c, const char *str),
+ TP_ARGS(c, str)
);
TRACE_EVENT(discard_buckets,
diff --git a/fs/bcachefs/util.c b/fs/bcachefs/util.c
index 216fadf16928..de331dec2a99 100644
--- a/fs/bcachefs/util.c
+++ b/fs/bcachefs/util.c
@@ -348,15 +348,12 @@ static void bch2_pr_time_units_aligned(struct printbuf *out, u64 ns)
{
const struct time_unit *u = bch2_pick_time_units(ns);
- prt_printf(out, "%llu ", div64_u64(ns, u->nsecs));
- prt_tab_rjust(out);
- prt_printf(out, "%s", u->name);
+ prt_printf(out, "%llu \r%s", div64_u64(ns, u->nsecs), u->name);
}
static inline void pr_name_and_units(struct printbuf *out, const char *name, u64 ns)
{
- prt_str(out, name);
- prt_tab(out);
+ prt_printf(out, "%s\t", name);
bch2_pr_time_units_aligned(out, ns);
prt_newline(out);
}
@@ -389,12 +386,8 @@ void bch2_time_stats_to_text(struct printbuf *out, struct bch2_time_stats *stats
}
printbuf_tabstop_push(out, out->indent + TABSTOP_SIZE);
- prt_printf(out, "count:");
- prt_tab(out);
- prt_printf(out, "%llu ",
- stats->duration_stats.n);
+ prt_printf(out, "count:\t%llu\n", stats->duration_stats.n);
printbuf_tabstop_pop(out);
- prt_newline(out);
printbuf_tabstops_reset(out);
@@ -403,13 +396,8 @@ void bch2_time_stats_to_text(struct printbuf *out, struct bch2_time_stats *stats
printbuf_tabstop_push(out, 0);
printbuf_tabstop_push(out, TABSTOP_SIZE + 2);
- prt_tab(out);
- prt_printf(out, "since mount");
- prt_tab_rjust(out);
- prt_tab(out);
+ prt_printf(out, "\tsince mount\r\trecent\r\n");
prt_printf(out, "recent");
- prt_tab_rjust(out);
- prt_newline(out);
printbuf_tabstops_reset(out);
printbuf_tabstop_push(out, out->indent + 20);
@@ -417,23 +405,20 @@ void bch2_time_stats_to_text(struct printbuf *out, struct bch2_time_stats *stats
printbuf_tabstop_push(out, 2);
printbuf_tabstop_push(out, TABSTOP_SIZE);
- prt_printf(out, "duration of events");
- prt_newline(out);
+ prt_printf(out, "duration of events\n");
printbuf_indent_add(out, 2);
pr_name_and_units(out, "min:", stats->min_duration);
pr_name_and_units(out, "max:", stats->max_duration);
pr_name_and_units(out, "total:", stats->total_duration);
- prt_printf(out, "mean:");
- prt_tab(out);
+ prt_printf(out, "mean:\t");
bch2_pr_time_units_aligned(out, d_mean);
prt_tab(out);
bch2_pr_time_units_aligned(out, mean_and_variance_weighted_get_mean(stats->duration_stats_weighted, TIME_STATS_MV_WEIGHT));
prt_newline(out);
- prt_printf(out, "stddev:");
- prt_tab(out);
+ prt_printf(out, "stddev:\t");
bch2_pr_time_units_aligned(out, d_stddev);
prt_tab(out);
bch2_pr_time_units_aligned(out, mean_and_variance_weighted_get_stddev(stats->duration_stats_weighted, TIME_STATS_MV_WEIGHT));
@@ -441,22 +426,19 @@ void bch2_time_stats_to_text(struct printbuf *out, struct bch2_time_stats *stats
printbuf_indent_sub(out, 2);
prt_newline(out);
- prt_printf(out, "time between events");
- prt_newline(out);
+ prt_printf(out, "time between events\n");
printbuf_indent_add(out, 2);
pr_name_and_units(out, "min:", stats->min_freq);
pr_name_and_units(out, "max:", stats->max_freq);
- prt_printf(out, "mean:");
- prt_tab(out);
+ prt_printf(out, "mean:\t");
bch2_pr_time_units_aligned(out, f_mean);
prt_tab(out);
bch2_pr_time_units_aligned(out, mean_and_variance_weighted_get_mean(stats->freq_stats_weighted, TIME_STATS_MV_WEIGHT));
prt_newline(out);
- prt_printf(out, "stddev:");
- prt_tab(out);
+ prt_printf(out, "stddev:\t");
bch2_pr_time_units_aligned(out, f_stddev);
prt_tab(out);
bch2_pr_time_units_aligned(out, mean_and_variance_weighted_get_stddev(stats->freq_stats_weighted, TIME_STATS_MV_WEIGHT));
@@ -589,40 +571,31 @@ void bch2_pd_controller_debug_to_text(struct printbuf *out, struct bch_pd_contro
if (!out->nr_tabstops)
printbuf_tabstop_push(out, 20);
- prt_printf(out, "rate:");
- prt_tab(out);
+ prt_printf(out, "rate:\t");
prt_human_readable_s64(out, pd->rate.rate);
prt_newline(out);
- prt_printf(out, "target:");
- prt_tab(out);
+ prt_printf(out, "target:\t");
prt_human_readable_u64(out, pd->last_target);
prt_newline(out);
- prt_printf(out, "actual:");
- prt_tab(out);
+ prt_printf(out, "actual:\t");
prt_human_readable_u64(out, pd->last_actual);
prt_newline(out);
- prt_printf(out, "proportional:");
- prt_tab(out);
+ prt_printf(out, "proportional:\t");
prt_human_readable_s64(out, pd->last_proportional);
prt_newline(out);
- prt_printf(out, "derivative:");
- prt_tab(out);
+ prt_printf(out, "derivative:\t");
prt_human_readable_s64(out, pd->last_derivative);
prt_newline(out);
- prt_printf(out, "change:");
- prt_tab(out);
+ prt_printf(out, "change:\t");
prt_human_readable_s64(out, pd->last_change);
prt_newline(out);
- prt_printf(out, "next io:");
- prt_tab(out);
- prt_printf(out, "%llims", div64_s64(pd->rate.next - local_clock(), NSEC_PER_MSEC));
- prt_newline(out);
+ prt_printf(out, "next io:\t%llims\n", div64_s64(pd->rate.next - local_clock(), NSEC_PER_MSEC));
}
/* misc: */
@@ -707,149 +680,6 @@ void memcpy_from_bio(void *dst, struct bio *src, struct bvec_iter src_iter)
}
}
-static int alignment_ok(const void *base, size_t align)
-{
- return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
- ((unsigned long)base & (align - 1)) == 0;
-}
-
-static void u32_swap(void *a, void *b, size_t size)
-{
- u32 t = *(u32 *)a;
- *(u32 *)a = *(u32 *)b;
- *(u32 *)b = t;
-}
-
-static void u64_swap(void *a, void *b, size_t size)
-{
- u64 t = *(u64 *)a;
- *(u64 *)a = *(u64 *)b;
- *(u64 *)b = t;
-}
-
-static void generic_swap(void *a, void *b, size_t size)
-{
- char t;
-
- do {
- t = *(char *)a;
- *(char *)a++ = *(char *)b;
- *(char *)b++ = t;
- } while (--size > 0);
-}
-
-static inline int do_cmp(void *base, size_t n, size_t size,
- int (*cmp_func)(const void *, const void *, size_t),
- size_t l, size_t r)
-{
- return cmp_func(base + inorder_to_eytzinger0(l, n) * size,
- base + inorder_to_eytzinger0(r, n) * size,
- size);
-}
-
-static inline void do_swap(void *base, size_t n, size_t size,
- void (*swap_func)(void *, void *, size_t),
- size_t l, size_t r)
-{
- swap_func(base + inorder_to_eytzinger0(l, n) * size,
- base + inorder_to_eytzinger0(r, n) * size,
- size);
-}
-
-void eytzinger0_sort(void *base, size_t n, size_t size,
- int (*cmp_func)(const void *, const void *, size_t),
- void (*swap_func)(void *, void *, size_t))
-{
- int i, c, r;
-
- if (!swap_func) {
- if (size == 4 && alignment_ok(base, 4))
- swap_func = u32_swap;
- else if (size == 8 && alignment_ok(base, 8))
- swap_func = u64_swap;
- else
- swap_func = generic_swap;
- }
-
- /* heapify */
- for (i = n / 2 - 1; i >= 0; --i) {
- for (r = i; r * 2 + 1 < n; r = c) {
- c = r * 2 + 1;
-
- if (c + 1 < n &&
- do_cmp(base, n, size, cmp_func, c, c + 1) < 0)
- c++;
-
- if (do_cmp(base, n, size, cmp_func, r, c) >= 0)
- break;
-
- do_swap(base, n, size, swap_func, r, c);
- }
- }
-
- /* sort */
- for (i = n - 1; i > 0; --i) {
- do_swap(base, n, size, swap_func, 0, i);
-
- for (r = 0; r * 2 + 1 < i; r = c) {
- c = r * 2 + 1;
-
- if (c + 1 < i &&
- do_cmp(base, n, size, cmp_func, c, c + 1) < 0)
- c++;
-
- if (do_cmp(base, n, size, cmp_func, r, c) >= 0)
- break;
-
- do_swap(base, n, size, swap_func, r, c);
- }
- }
-}
-
-void sort_cmp_size(void *base, size_t num, size_t size,
- int (*cmp_func)(const void *, const void *, size_t),
- void (*swap_func)(void *, void *, size_t size))
-{
- /* pre-scale counters for performance */
- int i = (num/2 - 1) * size, n = num * size, c, r;
-
- if (!swap_func) {
- if (size == 4 && alignment_ok(base, 4))
- swap_func = u32_swap;
- else if (size == 8 && alignment_ok(base, 8))
- swap_func = u64_swap;
- else
- swap_func = generic_swap;
- }
-
- /* heapify */
- for ( ; i >= 0; i -= size) {
- for (r = i; r * 2 + size < n; r = c) {
- c = r * 2 + size;
- if (c < n - size &&
- cmp_func(base + c, base + c + size, size) < 0)
- c += size;
- if (cmp_func(base + r, base + c, size) >= 0)
- break;
- swap_func(base + r, base + c, size);
- }
- }
-
- /* sort */
- for (i = n - size; i > 0; i -= size) {
- swap_func(base, base + i, size);
- for (r = 0; r * 2 + size < i; r = c) {
- c = r * 2 + size;
- if (c < i - size &&
- cmp_func(base + c, base + c + size, size) < 0)
- c += size;
- if (cmp_func(base + r, base + c, size) >= 0)
- break;
- swap_func(base + r, base + c, size);
- }
- }
-}
-
#if 0
void eytzinger1_test(void)
{
diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h
index 175aee3074c7..5cf885b09986 100644
--- a/fs/bcachefs/util.h
+++ b/fs/bcachefs/util.h
@@ -631,10 +631,6 @@ static inline void memset_u64s_tail(void *s, int c, unsigned bytes)
memset(s + bytes, c, rem);
}
-void sort_cmp_size(void *base, size_t num, size_t size,
- int (*cmp_func)(const void *, const void *, size_t),
- void (*swap_func)(void *, void *, size_t));
-
/* just the memmove, doesn't update @_nr */
#define __array_insert_item(_array, _nr, _pos) \
memmove(&(_array)[(_pos) + 1], \
@@ -792,9 +788,27 @@ static inline int copy_from_user_errcode(void *to, const void __user *from, unsi
#endif
+static inline void mod_bit(long nr, volatile unsigned long *addr, bool v)
+{
+ if (v)
+ set_bit(nr, addr);
+ else
+ clear_bit(nr, addr);
+}
+
static inline void __set_bit_le64(size_t bit, __le64 *addr)
{
addr[bit / 64] |= cpu_to_le64(BIT_ULL(bit % 64));
}
+static inline void __clear_bit_le64(size_t bit, __le64 *addr)
+{
+ addr[bit / 64] &= ~cpu_to_le64(BIT_ULL(bit % 64));
+}
+
+static inline bool test_bit_le64(size_t bit, __le64 *addr)
+{
+ return (addr[bit / 64] & cpu_to_le64(BIT_ULL(bit % 64))) != 0;
+}
+
#endif /* _BCACHEFS_UTIL_H */
diff --git a/fs/bcachefs/xattr.c b/fs/bcachefs/xattr.c
index 754f17bba68e..c11bf6dacc2c 100644
--- a/fs/bcachefs/xattr.c
+++ b/fs/bcachefs/xattr.c
@@ -71,7 +71,7 @@ const struct bch_hash_desc bch2_xattr_hash_desc = {
};
int bch2_xattr_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bkey_invalid_flags flags,
+ enum bch_validate_flags flags,
struct printbuf *err)
{
struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k);
@@ -118,11 +118,17 @@ void bch2_xattr_to_text(struct printbuf *out, struct bch_fs *c,
else
prt_printf(out, "(unknown type %u)", xattr.v->x_type);
+ unsigned name_len = xattr.v->x_name_len;
+ unsigned val_len = le16_to_cpu(xattr.v->x_val_len);
+ unsigned max_name_val_bytes = bkey_val_bytes(xattr.k) -
+ offsetof(struct bch_xattr, x_name);
+
+ val_len = min_t(int, val_len, max_name_val_bytes - name_len);
+ name_len = min(name_len, max_name_val_bytes);
+
prt_printf(out, "%.*s:%.*s",
- xattr.v->x_name_len,
- xattr.v->x_name,
- le16_to_cpu(xattr.v->x_val_len),
- (char *) xattr_val(xattr.v));
+ name_len, xattr.v->x_name,
+ val_len, (char *) xattr_val(xattr.v));
if (xattr.v->x_type == KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS ||
xattr.v->x_type == KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT) {
@@ -138,21 +144,13 @@ static int bch2_xattr_get_trans(struct btree_trans *trans, struct bch_inode_info
struct bch_hash_info hash = bch2_hash_info_init(trans->c, &inode->ei_inode);
struct xattr_search_key search = X_SEARCH(type, name, strlen(name));
struct btree_iter iter;
- struct bkey_s_c_xattr xattr;
- struct bkey_s_c k;
- int ret;
-
- ret = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc, &hash,
- inode_inum(inode), &search, 0);
+ struct bkey_s_c k = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc, &hash,
+ inode_inum(inode), &search, 0);
+ int ret = bkey_err(k);
if (ret)
- goto err1;
-
- k = bch2_btree_iter_peek_slot(&iter);
- ret = bkey_err(k);
- if (ret)
- goto err2;
+ return ret;
- xattr = bkey_s_c_to_xattr(k);
+ struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k);
ret = le16_to_cpu(xattr.v->x_val_len);
if (buffer) {
if (ret > size)
@@ -160,10 +158,8 @@ static int bch2_xattr_get_trans(struct btree_trans *trans, struct bch_inode_info
else
memcpy(buffer, xattr_val(xattr.v), ret);
}
-err2:
bch2_trans_iter_exit(trans, &iter);
-err1:
- return ret < 0 && bch2_err_matches(ret, ENOENT) ? -ENODATA : ret;
+ return ret;
}
int bch2_xattr_set(struct btree_trans *trans, subvol_inum inum,
@@ -177,7 +173,7 @@ int bch2_xattr_set(struct btree_trans *trans, subvol_inum inum,
int ret;
ret = bch2_subvol_is_ro_trans(trans, inum.subvol) ?:
- bch2_inode_peek(trans, &inode_iter, inode_u, inum, BTREE_ITER_INTENT);
+ bch2_inode_peek(trans, &inode_iter, inode_u, inum, BTREE_ITER_intent);
if (ret)
return ret;
@@ -212,8 +208,8 @@ int bch2_xattr_set(struct btree_trans *trans, subvol_inum inum,
ret = bch2_hash_set(trans, bch2_xattr_hash_desc, hash_info,
inum, &xattr->k_i,
- (flags & XATTR_CREATE ? BCH_HASH_SET_MUST_CREATE : 0)|
- (flags & XATTR_REPLACE ? BCH_HASH_SET_MUST_REPLACE : 0));
+ (flags & XATTR_CREATE ? STR_HASH_must_create : 0)|
+ (flags & XATTR_REPLACE ? STR_HASH_must_replace : 0));
} else {
struct xattr_search_key search =
X_SEARCH(type, name, strlen(name));
@@ -359,6 +355,9 @@ static int bch2_xattr_get_handler(const struct xattr_handler *handler,
int ret = bch2_trans_do(c, NULL, NULL, 0,
bch2_xattr_get_trans(trans, inode, name, buffer, size, handler->flags));
+ if (ret < 0 && bch2_err_matches(ret, ENOENT))
+ ret = -ENODATA;
+
return bch2_err_class(ret);
}
diff --git a/fs/bcachefs/xattr.h b/fs/bcachefs/xattr.h
index 1337f31a5c49..1574b9eb4c85 100644
--- a/fs/bcachefs/xattr.h
+++ b/fs/bcachefs/xattr.h
@@ -7,7 +7,7 @@
extern const struct bch_hash_desc bch2_xattr_hash_desc;
int bch2_xattr_invalid(struct bch_fs *, struct bkey_s_c,
- enum bkey_invalid_flags, struct printbuf *);
+ enum bch_validate_flags, struct printbuf *);
void bch2_xattr_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
#define bch2_bkey_ops_xattr ((struct bkey_ops) { \
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 5397b552fbeb..a43897b03ce9 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1262,6 +1262,9 @@ out_free_interp:
if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
elf_ex->e_type == ET_DYN && !interpreter) {
mm->brk = mm->start_brk = ELF_ET_DYN_BASE;
+ } else {
+ /* Otherwise leave a gap between .bss and brk. */
+ mm->brk = mm->start_brk = mm->brk + PAGE_SIZE;
}
mm->brk = mm->start_brk = arch_randomize_brk(mm);
@@ -1564,7 +1567,6 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
}
-#define MAX_FILE_NOTE_SIZE (4*1024*1024)
/*
* Format of NT_FILE note:
*
@@ -1592,8 +1594,12 @@ static int fill_files_note(struct memelfnote *note, struct coredump_params *cprm
names_ofs = (2 + 3 * count) * sizeof(data[0]);
alloc:
- if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
+ /* paranoia check */
+ if (size >= core_file_note_size_limit) {
+ pr_warn_once("coredump Note size too large: %u (does kernel.core_file_note_size_limit sysctl need adjustment?\n",
+ size);
return -EINVAL;
+ }
size = round_up(size, PAGE_SIZE);
/*
* "size" can be 0 here legitimately.
@@ -1928,7 +1934,7 @@ static void free_note_info(struct elf_note_info *info)
threads = t->next;
WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
for (i = 1; i < info->thread_notes; ++i)
- kfree(t->notes[i].data);
+ kvfree(t->notes[i].data);
kfree(t);
}
kfree(info->psinfo.data);
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 1920ed69279b..b799701454a9 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -505,8 +505,9 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
char *k_platform, *k_base_platform;
char __user *u_platform, *u_base_platform, *p;
int loop;
- int nr; /* reset for each csp adjustment */
unsigned long flags = 0;
+ int ei_index;
+ elf_addr_t *elf_info;
#ifdef CONFIG_MMU
/* In some cases (e.g. Hyper-Threading), we want to avoid L1 evictions
@@ -601,44 +602,24 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
csp -= sp & 15UL;
sp -= sp & 15UL;
- /* put the ELF interpreter info on the stack */
-#define NEW_AUX_ENT(id, val) \
- do { \
- struct { unsigned long _id, _val; } __user *ent, v; \
- \
- ent = (void __user *) csp; \
- v._id = (id); \
- v._val = (val); \
- if (copy_to_user(ent + nr, &v, sizeof(v))) \
- return -EFAULT; \
- nr++; \
+ /* Create the ELF interpreter info */
+ elf_info = (elf_addr_t *)mm->saved_auxv;
+ /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
+#define NEW_AUX_ENT(id, val) \
+ do { \
+ *elf_info++ = id; \
+ *elf_info++ = val; \
} while (0)
- nr = 0;
- csp -= 2 * sizeof(unsigned long);
- NEW_AUX_ENT(AT_NULL, 0);
- if (k_platform) {
- nr = 0;
- csp -= 2 * sizeof(unsigned long);
- NEW_AUX_ENT(AT_PLATFORM,
- (elf_addr_t) (unsigned long) u_platform);
- }
-
- if (k_base_platform) {
- nr = 0;
- csp -= 2 * sizeof(unsigned long);
- NEW_AUX_ENT(AT_BASE_PLATFORM,
- (elf_addr_t) (unsigned long) u_base_platform);
- }
-
- if (bprm->have_execfd) {
- nr = 0;
- csp -= 2 * sizeof(unsigned long);
- NEW_AUX_ENT(AT_EXECFD, bprm->execfd);
- }
-
- nr = 0;
- csp -= DLINFO_ITEMS * 2 * sizeof(unsigned long);
+#ifdef ARCH_DLINFO
+ /*
+ * ARCH_DLINFO must come first so PPC can do its special alignment of
+ * AUXV.
+ * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
+ * ARCH_DLINFO changes
+ */
+ ARCH_DLINFO;
+#endif
NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
#ifdef ELF_HWCAP2
NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2);
@@ -659,17 +640,29 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
NEW_AUX_ENT(AT_EGID, (elf_addr_t) from_kgid_munged(cred->user_ns, cred->egid));
NEW_AUX_ENT(AT_SECURE, bprm->secureexec);
NEW_AUX_ENT(AT_EXECFN, bprm->exec);
+ if (k_platform)
+ NEW_AUX_ENT(AT_PLATFORM,
+ (elf_addr_t)(unsigned long)u_platform);
+ if (k_base_platform)
+ NEW_AUX_ENT(AT_BASE_PLATFORM,
+ (elf_addr_t)(unsigned long)u_base_platform);
+ if (bprm->have_execfd)
+ NEW_AUX_ENT(AT_EXECFD, bprm->execfd);
+#undef NEW_AUX_ENT
+ /* AT_NULL is zero; clear the rest too */
+ memset(elf_info, 0, (char *)mm->saved_auxv +
+ sizeof(mm->saved_auxv) - (char *)elf_info);
-#ifdef ARCH_DLINFO
- nr = 0;
- csp -= AT_VECTOR_SIZE_ARCH * 2 * sizeof(unsigned long);
+ /* And advance past the AT_NULL entry. */
+ elf_info += 2;
- /* ARCH_DLINFO must come last so platform specific code can enforce
- * special alignment requirements on the AUXV if necessary (eg. PPC).
- */
- ARCH_DLINFO;
-#endif
-#undef NEW_AUX_ENT
+ ei_index = elf_info - (elf_addr_t *)mm->saved_auxv;
+ csp -= ei_index * sizeof(elf_addr_t);
+
+ /* Put the elf_info on the stack in the right place. */
+ if (copy_to_user((void __user *)csp, mm->saved_auxv,
+ ei_index * sizeof(elf_addr_t)))
+ return -EFAULT;
/* allocate room for argv[] and envv[] */
csp -= (bprm->envc + 1) * sizeof(elf_caddr_t);
@@ -1359,7 +1352,7 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid));
SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
rcu_read_unlock();
- strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
+ get_task_comm(psinfo->pr_fname, p);
return 0;
}
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index c1e6a5bbeeaf..a2de5c05f97c 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -261,7 +261,7 @@ static void update_share_count(struct share_check *sc, int oldcount,
else if (oldcount < 1 && newcount > 0)
sc->share_count++;
- if (newref->root_id == sc->root->root_key.objectid &&
+ if (newref->root_id == btrfs_root_id(sc->root) &&
newref->wanted_disk_byte == sc->data_bytenr &&
newref->key_for_search.objectid == sc->inum)
sc->self_ref_count += newref->count;
@@ -769,7 +769,7 @@ static int resolve_indirect_refs(struct btrfs_backref_walk_ctx *ctx,
continue;
}
- if (sc && ref->root_id != sc->root->root_key.objectid) {
+ if (sc && ref->root_id != btrfs_root_id(sc->root)) {
free_pref(ref);
ret = BACKREF_FOUND_SHARED;
goto out;
@@ -919,40 +919,38 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
switch (node->type) {
case BTRFS_TREE_BLOCK_REF_KEY: {
/* NORMAL INDIRECT METADATA backref */
- struct btrfs_delayed_tree_ref *ref;
struct btrfs_key *key_ptr = NULL;
+ /* The owner of a tree block ref is the level. */
+ int level = btrfs_delayed_ref_owner(node);
if (head->extent_op && head->extent_op->update_key) {
btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
key_ptr = &key;
}
- ref = btrfs_delayed_node_to_tree_ref(node);
- ret = add_indirect_ref(fs_info, preftrees, ref->root,
- key_ptr, ref->level + 1,
- node->bytenr, count, sc,
- GFP_ATOMIC);
+ ret = add_indirect_ref(fs_info, preftrees, node->ref_root,
+ key_ptr, level + 1, node->bytenr,
+ count, sc, GFP_ATOMIC);
break;
}
case BTRFS_SHARED_BLOCK_REF_KEY: {
- /* SHARED DIRECT METADATA backref */
- struct btrfs_delayed_tree_ref *ref;
-
- ref = btrfs_delayed_node_to_tree_ref(node);
+ /*
+ * SHARED DIRECT METADATA backref
+ *
+ * The owner of a tree block ref is the level.
+ */
+ int level = btrfs_delayed_ref_owner(node);
- ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
- ref->parent, node->bytenr, count,
+ ret = add_direct_ref(fs_info, preftrees, level + 1,
+ node->parent, node->bytenr, count,
sc, GFP_ATOMIC);
break;
}
case BTRFS_EXTENT_DATA_REF_KEY: {
/* NORMAL INDIRECT DATA backref */
- struct btrfs_delayed_data_ref *ref;
- ref = btrfs_delayed_node_to_data_ref(node);
-
- key.objectid = ref->objectid;
+ key.objectid = btrfs_delayed_ref_owner(node);
key.type = BTRFS_EXTENT_DATA_KEY;
- key.offset = ref->offset;
+ key.offset = btrfs_delayed_ref_offset(node);
/*
* If we have a share check context and a reference for
@@ -972,18 +970,14 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
if (sc && count < 0)
sc->have_delayed_delete_refs = true;
- ret = add_indirect_ref(fs_info, preftrees, ref->root,
+ ret = add_indirect_ref(fs_info, preftrees, node->ref_root,
&key, 0, node->bytenr, count, sc,
GFP_ATOMIC);
break;
}
case BTRFS_SHARED_DATA_REF_KEY: {
/* SHARED DIRECT FULL backref */
- struct btrfs_delayed_data_ref *ref;
-
- ref = btrfs_delayed_node_to_data_ref(node);
-
- ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
+ ret = add_direct_ref(fs_info, preftrees, 0, node->parent,
node->bytenr, count, sc,
GFP_ATOMIC);
break;
@@ -2629,7 +2623,7 @@ static int iterate_inode_refs(u64 inum, struct inode_fs_paths *ipath)
btrfs_debug(fs_root->fs_info,
"following ref at offset %u for inode %llu in tree %llu",
cur, found_key.objectid,
- fs_root->root_key.objectid);
+ btrfs_root_id(fs_root));
ret = inode_to_path(parent, name_len,
(unsigned long)(iref + 1), eb, ipath);
if (ret)
@@ -2776,20 +2770,14 @@ struct btrfs_data_container *init_data_container(u32 total_bytes)
size_t alloc_bytes;
alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
- data = kvmalloc(alloc_bytes, GFP_KERNEL);
+ data = kvzalloc(alloc_bytes, GFP_KERNEL);
if (!data)
return ERR_PTR(-ENOMEM);
- if (total_bytes >= sizeof(*data)) {
+ if (total_bytes >= sizeof(*data))
data->bytes_left = total_bytes - sizeof(*data);
- data->bytes_missing = 0;
- } else {
+ else
data->bytes_missing = sizeof(*data) - total_bytes;
- data->bytes_left = 0;
- }
-
- data->elem_cnt = 0;
- data->elem_missed = 0;
return data;
}
@@ -3367,7 +3355,7 @@ static int handle_indirect_tree_backref(struct btrfs_trans_handle *trans,
if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
btrfs_err(fs_info,
"couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
- cur->bytenr, level - 1, root->root_key.objectid,
+ cur->bytenr, level - 1, btrfs_root_id(root),
tree_key->objectid, tree_key->type, tree_key->offset);
btrfs_put_root(root);
ret = -ENOENT;
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 5f7587ca1ca7..1e09aeea69c2 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -1559,7 +1559,8 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
* needing to allocate extents from the block group.
*/
used = btrfs_space_info_used(space_info, true);
- if (space_info->total_bytes - block_group->length < used) {
+ if (space_info->total_bytes - block_group->length < used &&
+ block_group->zone_unusable < block_group->length) {
/*
* Add a reference for the list, compensate for the ref
* drop under the "next" label for the
diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c
index 95c174f9fd4f..b299b82d676e 100644
--- a/fs/btrfs/block-rsv.c
+++ b/fs/btrfs/block-rsv.c
@@ -341,9 +341,9 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info)
read_lock(&fs_info->global_root_lock);
rbtree_postorder_for_each_entry_safe(root, tmp, &fs_info->global_root_tree,
rb_node) {
- if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID ||
- root->root_key.objectid == BTRFS_CSUM_TREE_OBJECTID ||
- root->root_key.objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) {
+ if (btrfs_root_id(root) == BTRFS_EXTENT_TREE_OBJECTID ||
+ btrfs_root_id(root) == BTRFS_CSUM_TREE_OBJECTID ||
+ btrfs_root_id(root) == BTRFS_FREE_SPACE_TREE_OBJECTID) {
num_bytes += btrfs_root_used(&root->root_item);
min_items++;
}
@@ -406,7 +406,7 @@ void btrfs_init_root_block_rsv(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- switch (root->root_key.objectid) {
+ switch (btrfs_root_id(root)) {
case BTRFS_CSUM_TREE_OBJECTID:
case BTRFS_EXTENT_TREE_OBJECTID:
case BTRFS_FREE_SPACE_TREE_OBJECTID:
@@ -468,8 +468,7 @@ static struct btrfs_block_rsv *get_block_rsv(
if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
(root == fs_info->uuid_root) ||
- (trans->adding_csums &&
- root->root_key.objectid == BTRFS_CSUM_TREE_OBJECTID))
+ (trans->adding_csums && btrfs_root_id(root) == BTRFS_CSUM_TREE_OBJECTID))
block_rsv = trans->block_rsv;
if (!block_rsv)
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 100020ca4658..91c994b569f3 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -381,9 +381,11 @@ static inline void btrfs_set_inode_last_sub_trans(struct btrfs_inode *inode)
}
/*
- * Should be called while holding the inode's VFS lock in exclusive mode or in a
- * context where no one else can access the inode concurrently (during inode
- * creation or when loading an inode from disk).
+ * Should be called while holding the inode's VFS lock in exclusive mode, or
+ * while holding the inode's mmap lock (struct btrfs_inode::i_mmap_lock) in
+ * either shared or exclusive mode, or in a context where no one else can access
+ * the inode concurrently (during inode creation or when loading an inode from
+ * disk).
*/
static inline void btrfs_set_inode_full_sync(struct btrfs_inode *inode)
{
@@ -496,7 +498,6 @@ void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state
void btrfs_split_delalloc_extent(struct btrfs_inode *inode,
struct extent_state *orig, u64 split);
void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end);
-vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf);
void btrfs_evict_inode(struct inode *inode);
struct inode *btrfs_alloc_inode(struct super_block *sb);
void btrfs_destroy_inode(struct inode *inode);
@@ -544,6 +545,7 @@ ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter,
size_t done_before);
struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter,
size_t done_before);
+struct btrfs_inode *btrfs_find_first_inode(struct btrfs_root *root, u64 min_ino);
extern const struct dentry_operations btrfs_dentry_operations;
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index b2b94009959d..6441e47d8a5e 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -90,20 +90,20 @@ bool btrfs_compress_is_valid_type(const char *str, size_t len)
}
static int compression_compress_pages(int type, struct list_head *ws,
- struct address_space *mapping, u64 start, struct page **pages,
- unsigned long *out_pages, unsigned long *total_in,
- unsigned long *total_out)
+ struct address_space *mapping, u64 start,
+ struct folio **folios, unsigned long *out_folios,
+ unsigned long *total_in, unsigned long *total_out)
{
switch (type) {
case BTRFS_COMPRESS_ZLIB:
- return zlib_compress_pages(ws, mapping, start, pages,
- out_pages, total_in, total_out);
+ return zlib_compress_folios(ws, mapping, start, folios,
+ out_folios, total_in, total_out);
case BTRFS_COMPRESS_LZO:
- return lzo_compress_pages(ws, mapping, start, pages,
- out_pages, total_in, total_out);
+ return lzo_compress_folios(ws, mapping, start, folios,
+ out_folios, total_in, total_out);
case BTRFS_COMPRESS_ZSTD:
- return zstd_compress_pages(ws, mapping, start, pages,
- out_pages, total_in, total_out);
+ return zstd_compress_folios(ws, mapping, start, folios,
+ out_folios, total_in, total_out);
case BTRFS_COMPRESS_NONE:
default:
/*
@@ -115,7 +115,7 @@ static int compression_compress_pages(int type, struct list_head *ws,
* Not a big deal, just need to inform caller that we
* haven't allocated any pages yet.
*/
- *out_pages = 0;
+ *out_folios = 0;
return -E2BIG;
}
}
@@ -158,11 +158,11 @@ static int compression_decompress(int type, struct list_head *ws,
}
}
-static void btrfs_free_compressed_pages(struct compressed_bio *cb)
+static void btrfs_free_compressed_folios(struct compressed_bio *cb)
{
- for (unsigned int i = 0; i < cb->nr_pages; i++)
- btrfs_free_compr_page(cb->compressed_pages[i]);
- kfree(cb->compressed_pages);
+ for (unsigned int i = 0; i < cb->nr_folios; i++)
+ btrfs_free_compr_folio(cb->compressed_folios[i]);
+ kfree(cb->compressed_folios);
}
static int btrfs_decompress_bio(struct compressed_bio *cb);
@@ -223,25 +223,25 @@ static unsigned long btrfs_compr_pool_scan(struct shrinker *sh, struct shrink_co
/*
* Common wrappers for page allocation from compression wrappers
*/
-struct page *btrfs_alloc_compr_page(void)
+struct folio *btrfs_alloc_compr_folio(void)
{
- struct page *page = NULL;
+ struct folio *folio = NULL;
spin_lock(&compr_pool.lock);
if (compr_pool.count > 0) {
- page = list_first_entry(&compr_pool.list, struct page, lru);
- list_del_init(&page->lru);
+ folio = list_first_entry(&compr_pool.list, struct folio, lru);
+ list_del_init(&folio->lru);
compr_pool.count--;
}
spin_unlock(&compr_pool.lock);
- if (page)
- return page;
+ if (folio)
+ return folio;
- return alloc_page(GFP_NOFS);
+ return folio_alloc(GFP_NOFS, 0);
}
-void btrfs_free_compr_page(struct page *page)
+void btrfs_free_compr_folio(struct folio *folio)
{
bool do_free = false;
@@ -249,7 +249,7 @@ void btrfs_free_compr_page(struct page *page)
if (compr_pool.count > compr_pool.thresh) {
do_free = true;
} else {
- list_add(&page->lru, &compr_pool.list);
+ list_add(&folio->lru, &compr_pool.list);
compr_pool.count++;
}
spin_unlock(&compr_pool.lock);
@@ -257,8 +257,8 @@ void btrfs_free_compr_page(struct page *page)
if (!do_free)
return;
- ASSERT(page_ref_count(page) == 1);
- put_page(page);
+ ASSERT(folio_ref_count(folio) == 1);
+ folio_put(folio);
}
static void end_bbio_comprssed_read(struct btrfs_bio *bbio)
@@ -269,7 +269,7 @@ static void end_bbio_comprssed_read(struct btrfs_bio *bbio)
if (!status)
status = errno_to_blk_status(btrfs_decompress_bio(cb));
- btrfs_free_compressed_pages(cb);
+ btrfs_free_compressed_folios(cb);
btrfs_bio_end_io(cb->orig_bbio, status);
bio_put(&bbio->bio);
}
@@ -323,7 +323,7 @@ static void btrfs_finish_compressed_write_work(struct work_struct *work)
end_compressed_writeback(cb);
/* Note, our inode could be gone now */
- btrfs_free_compressed_pages(cb);
+ btrfs_free_compressed_folios(cb);
bio_put(&cb->bbio.bio);
}
@@ -342,17 +342,19 @@ static void end_bbio_comprssed_write(struct btrfs_bio *bbio)
queue_work(fs_info->compressed_write_workers, &cb->write_end_work);
}
-static void btrfs_add_compressed_bio_pages(struct compressed_bio *cb)
+static void btrfs_add_compressed_bio_folios(struct compressed_bio *cb)
{
struct bio *bio = &cb->bbio.bio;
u32 offset = 0;
while (offset < cb->compressed_len) {
+ int ret;
u32 len = min_t(u32, cb->compressed_len - offset, PAGE_SIZE);
/* Maximum compressed extent is smaller than bio size limit. */
- __bio_add_page(bio, cb->compressed_pages[offset >> PAGE_SHIFT],
- len, 0);
+ ret = bio_add_folio(bio, cb->compressed_folios[offset >> PAGE_SHIFT],
+ len, 0);
+ ASSERT(ret);
offset += len;
}
}
@@ -367,8 +369,8 @@ static void btrfs_add_compressed_bio_pages(struct compressed_bio *cb)
* the end io hooks.
*/
void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
- struct page **compressed_pages,
- unsigned int nr_pages,
+ struct folio **compressed_folios,
+ unsigned int nr_folios,
blk_opf_t write_flags,
bool writeback)
{
@@ -384,14 +386,14 @@ void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
end_bbio_comprssed_write);
cb->start = ordered->file_offset;
cb->len = ordered->num_bytes;
- cb->compressed_pages = compressed_pages;
+ cb->compressed_folios = compressed_folios;
cb->compressed_len = ordered->disk_num_bytes;
cb->writeback = writeback;
INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work);
- cb->nr_pages = nr_pages;
+ cb->nr_folios = nr_folios;
cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT;
cb->bbio.ordered = ordered;
- btrfs_add_compressed_bio_pages(cb);
+ btrfs_add_compressed_bio_folios(cb);
btrfs_submit_bio(&cb->bbio, 0);
}
@@ -599,14 +601,14 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
free_extent_map(em);
- cb->nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
- cb->compressed_pages = kcalloc(cb->nr_pages, sizeof(struct page *), GFP_NOFS);
- if (!cb->compressed_pages) {
+ cb->nr_folios = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
+ cb->compressed_folios = kcalloc(cb->nr_folios, sizeof(struct page *), GFP_NOFS);
+ if (!cb->compressed_folios) {
ret = BLK_STS_RESOURCE;
goto out_free_bio;
}
- ret2 = btrfs_alloc_page_array(cb->nr_pages, cb->compressed_pages, 0);
+ ret2 = btrfs_alloc_folio_array(cb->nr_folios, cb->compressed_folios, 0);
if (ret2) {
ret = BLK_STS_RESOURCE;
goto out_free_compressed_pages;
@@ -618,7 +620,7 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
/* include any pages we added in add_ra-bio_pages */
cb->len = bbio->bio.bi_iter.bi_size;
cb->bbio.bio.bi_iter.bi_sector = bbio->bio.bi_iter.bi_sector;
- btrfs_add_compressed_bio_pages(cb);
+ btrfs_add_compressed_bio_folios(cb);
if (memstall)
psi_memstall_leave(&pflags);
@@ -627,7 +629,7 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
return;
out_free_compressed_pages:
- kfree(cb->compressed_pages);
+ kfree(cb->compressed_folios);
out_free_bio:
bio_put(&cb->bbio.bio);
out:
@@ -974,6 +976,29 @@ static unsigned int btrfs_compress_set_level(int type, unsigned level)
return level;
}
+/* Wrapper around find_get_page(), with extra error message. */
+int btrfs_compress_filemap_get_folio(struct address_space *mapping, u64 start,
+ struct folio **in_folio_ret)
+{
+ struct folio *in_folio;
+
+ /*
+ * The compressed write path should have the folio locked already, thus
+ * we only need to grab one reference.
+ */
+ in_folio = filemap_get_folio(mapping, start >> PAGE_SHIFT);
+ if (IS_ERR(in_folio)) {
+ struct btrfs_inode *inode = BTRFS_I(mapping->host);
+
+ btrfs_crit(inode->root->fs_info,
+ "failed to get page cache, root %lld ino %llu file offset %llu",
+ btrfs_root_id(inode->root), btrfs_ino(inode), start);
+ return -ENOENT;
+ }
+ *in_folio_ret = in_folio;
+ return 0;
+}
+
/*
* Given an address space and start and length, compress the bytes into @pages
* that are allocated on demand.
@@ -994,11 +1019,9 @@ static unsigned int btrfs_compress_set_level(int type, unsigned level)
* @total_out is an in/out parameter, must be set to the input length and will
* be also used to return the total number of compressed bytes
*/
-int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
- u64 start, struct page **pages,
- unsigned long *out_pages,
- unsigned long *total_in,
- unsigned long *total_out)
+int btrfs_compress_folios(unsigned int type_level, struct address_space *mapping,
+ u64 start, struct folio **folios, unsigned long *out_folios,
+ unsigned long *total_in, unsigned long *total_out)
{
int type = btrfs_compress_type(type_level);
int level = btrfs_compress_level(type_level);
@@ -1007,8 +1030,8 @@ int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
level = btrfs_compress_set_level(type, level);
workspace = get_workspace(type, level);
- ret = compression_compress_pages(type, workspace, mapping, start, pages,
- out_pages, total_in, total_out);
+ ret = compression_compress_pages(type, workspace, mapping, start, folios,
+ out_folios, total_in, total_out);
put_workspace(type, workspace);
return ret;
}
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 4691a84ca838..c20c1a1b09d5 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -41,11 +41,11 @@ static_assert((BTRFS_MAX_COMPRESSED % PAGE_SIZE) == 0);
#define BTRFS_ZLIB_DEFAULT_LEVEL 3
struct compressed_bio {
- /* Number of compressed pages in the array */
- unsigned int nr_pages;
+ /* Number of compressed folios in the array. */
+ unsigned int nr_folios;
- /* the pages with the compressed data on them */
- struct page **compressed_pages;
+ /* The folios with the compressed data on them. */
+ struct folio **compressed_folios;
/* starting offset in the inode for our pages */
u64 start;
@@ -85,27 +85,24 @@ static inline unsigned int btrfs_compress_level(unsigned int type_level)
int __init btrfs_init_compress(void);
void __cold btrfs_exit_compress(void);
-int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
- u64 start, struct page **pages,
- unsigned long *out_pages,
- unsigned long *total_in,
- unsigned long *total_out);
+int btrfs_compress_folios(unsigned int type_level, struct address_space *mapping,
+ u64 start, struct folio **folios, unsigned long *out_folios,
+ unsigned long *total_in, unsigned long *total_out);
int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page,
unsigned long start_byte, size_t srclen, size_t destlen);
int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
struct compressed_bio *cb, u32 decompressed);
void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
- struct page **compressed_pages,
- unsigned int nr_pages,
- blk_opf_t write_flags,
- bool writeback);
+ struct folio **compressed_folios,
+ unsigned int nr_folios, blk_opf_t write_flags,
+ bool writeback);
void btrfs_submit_compressed_read(struct btrfs_bio *bbio);
unsigned int btrfs_compress_str2level(unsigned int type, const char *str);
-struct page *btrfs_alloc_compr_page(void);
-void btrfs_free_compr_page(struct page *page);
+struct folio *btrfs_alloc_compr_folio(void);
+void btrfs_free_compr_folio(struct folio *folio);
enum btrfs_compression_type {
BTRFS_COMPRESS_NONE = 0,
@@ -149,8 +146,11 @@ bool btrfs_compress_is_valid_type(const char *str, size_t len);
int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end);
-int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
- u64 start, struct page **pages, unsigned long *out_pages,
+int btrfs_compress_filemap_get_folio(struct address_space *mapping, u64 start,
+ struct folio **in_folio_ret);
+
+int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
+ u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out);
int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int zlib_decompress(struct list_head *ws, const u8 *data_in,
@@ -160,8 +160,8 @@ struct list_head *zlib_alloc_workspace(unsigned int level);
void zlib_free_workspace(struct list_head *ws);
struct list_head *zlib_get_workspace(unsigned int level);
-int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
- u64 start, struct page **pages, unsigned long *out_pages,
+int lzo_compress_folios(struct list_head *ws, struct address_space *mapping,
+ u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out);
int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int lzo_decompress(struct list_head *ws, const u8 *data_in,
@@ -170,8 +170,8 @@ int lzo_decompress(struct list_head *ws, const u8 *data_in,
struct list_head *lzo_alloc_workspace(unsigned int level);
void lzo_free_workspace(struct list_head *ws);
-int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
- u64 start, struct page **pages, unsigned long *out_pages,
+int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
+ u64 start, struct folio **folios, unsigned long *out_folios,
unsigned long *total_in, unsigned long *total_out);
int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int zstd_decompress(struct list_head *ws, const u8 *data_in,
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index aaf53fd84358..1a49b9232990 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -291,7 +291,7 @@ static void add_root_to_dirty_list(struct btrfs_root *root)
spin_lock(&fs_info->trans_lock);
if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
/* Want the extent tree to be the last on the list */
- if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID)
+ if (btrfs_root_id(root) == BTRFS_EXTENT_TREE_OBJECTID)
list_move_tail(&root->dirty_list,
&fs_info->dirty_cowonly_roots);
else
@@ -454,7 +454,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
}
} else {
refs = 1;
- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID ||
btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
else
@@ -466,15 +466,14 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
if (refs > 1) {
- if ((owner == root->root_key.objectid ||
- root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
+ if ((owner == btrfs_root_id(root) ||
+ btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) &&
!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
ret = btrfs_inc_ref(trans, root, buf, 1);
if (ret)
return ret;
- if (root->root_key.objectid ==
- BTRFS_TREE_RELOC_OBJECTID) {
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) {
ret = btrfs_dec_ref(trans, root, buf, 0);
if (ret)
return ret;
@@ -485,8 +484,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
} else {
- if (root->root_key.objectid ==
- BTRFS_TREE_RELOC_OBJECTID)
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
ret = btrfs_inc_ref(trans, root, cow, 1);
else
ret = btrfs_inc_ref(trans, root, cow, 0);
@@ -500,8 +498,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
}
} else {
if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
- if (root->root_key.objectid ==
- BTRFS_TREE_RELOC_OBJECTID)
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
ret = btrfs_inc_ref(trans, root, cow, 1);
else
ret = btrfs_inc_ref(trans, root, cow, 0);
@@ -563,13 +560,13 @@ int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
else
btrfs_node_key(buf, &disk_key, 0);
- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) {
if (parent)
parent_start = parent->start;
reloc_src_root = btrfs_header_owner(buf);
}
cow = btrfs_alloc_tree_block(trans, root, parent_start,
- root->root_key.objectid, &disk_key, level,
+ btrfs_root_id(root), &disk_key, level,
search_start, empty_size, reloc_src_root, nest);
if (IS_ERR(cow))
return PTR_ERR(cow);
@@ -582,10 +579,10 @@ int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
BTRFS_HEADER_FLAG_RELOC);
- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
else
- btrfs_set_header_owner(cow, root->root_key.objectid);
+ btrfs_set_header_owner(cow, btrfs_root_id(root));
write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
@@ -609,7 +606,7 @@ int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
if (buf == root->node) {
WARN_ON(parent && parent != buf);
- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID ||
btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
parent_start = buf->start;
@@ -685,7 +682,7 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
*/
if (btrfs_header_generation(buf) == trans->transid &&
!btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
- !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
+ !(btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID &&
btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
!test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
return 0;
@@ -1003,7 +1000,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
goto out;
}
- __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
+ btrfs_tree_lock_nested(left, BTRFS_NESTING_LEFT);
wret = btrfs_cow_block(trans, root, left,
parent, pslot - 1, &left,
BTRFS_NESTING_LEFT_COW);
@@ -1021,7 +1018,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
goto out;
}
- __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
+ btrfs_tree_lock_nested(right, BTRFS_NESTING_RIGHT);
wret = btrfs_cow_block(trans, root, right,
parent, pslot + 1, &right,
BTRFS_NESTING_RIGHT_COW);
@@ -1205,7 +1202,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
if (IS_ERR(left))
return PTR_ERR(left);
- __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
+ btrfs_tree_lock_nested(left, BTRFS_NESTING_LEFT);
left_nr = btrfs_header_nritems(left);
if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
@@ -1265,7 +1262,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
if (IS_ERR(right))
return PTR_ERR(right);
- __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
+ btrfs_tree_lock_nested(right, BTRFS_NESTING_RIGHT);
right_nr = btrfs_header_nritems(right);
if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
@@ -1511,7 +1508,7 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
check.has_first_key = true;
check.level = parent_level - 1;
check.transid = gen;
- check.owner_root = root->root_key.objectid;
+ check.owner_root = btrfs_root_id(root);
/*
* If we need to read an extent buffer from disk and we are holding locks
@@ -1556,7 +1553,7 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
btrfs_release_path(p);
return -EIO;
}
- if (btrfs_check_eb_owner(tmp, root->root_key.objectid)) {
+ if (btrfs_check_eb_owner(tmp, btrfs_root_id(root))) {
free_extent_buffer(tmp);
btrfs_release_path(p);
return -EUCLEAN;
@@ -2865,7 +2862,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
else
btrfs_node_key(lower, &lower_key, 0);
- c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
+ c = btrfs_alloc_tree_block(trans, root, 0, btrfs_root_id(root),
&lower_key, level, root->node->start, 0,
0, BTRFS_NESTING_NEW_ROOT);
if (IS_ERR(c))
@@ -3009,7 +3006,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
mid = (c_nritems + 1) / 2;
btrfs_node_key(c, &disk_key, mid);
- split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
+ split = btrfs_alloc_tree_block(trans, root, 0, btrfs_root_id(root),
&disk_key, level, c->start, 0,
0, BTRFS_NESTING_SPLIT);
if (IS_ERR(split))
@@ -3267,7 +3264,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
if (IS_ERR(right))
return PTR_ERR(right);
- __btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
+ btrfs_tree_lock_nested(right, BTRFS_NESTING_RIGHT);
free_space = btrfs_leaf_free_space(right);
if (free_space < data_size)
@@ -3483,7 +3480,7 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
if (IS_ERR(left))
return PTR_ERR(left);
- __btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
+ btrfs_tree_lock_nested(left, BTRFS_NESTING_LEFT);
free_space = btrfs_leaf_free_space(left);
if (free_space < data_size) {
@@ -3761,7 +3758,7 @@ again:
* BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just
* use BTRFS_NESTING_NEW_ROOT.
*/
- right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
+ right = btrfs_alloc_tree_block(trans, root, 0, btrfs_root_id(root),
&disk_key, 0, l->start, 0, 0,
num_doubles ? BTRFS_NESTING_NEW_ROOT :
BTRFS_NESTING_SPLIT);
diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c
index f015fa1b6301..407ccec3e57e 100644
--- a/fs/btrfs/defrag.c
+++ b/fs/btrfs/defrag.c
@@ -147,7 +147,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
defrag->ino = btrfs_ino(inode);
defrag->transid = transid;
- defrag->root = root->root_key.objectid;
+ defrag->root = btrfs_root_id(root);
defrag->extent_thresh = extent_thresh;
spin_lock(&fs_info->defrag_inodes_lock);
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index dd6f566a383f..95a0497fa866 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1133,6 +1133,9 @@ __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
if (ret)
return ret;
+ ret = btrfs_record_root_in_trans(trans, node->root);
+ if (ret)
+ return ret;
ret = btrfs_update_delayed_inode(trans, node->root, path, node);
return ret;
}
@@ -1648,7 +1651,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
if (unlikely(ret)) {
btrfs_err(trans->fs_info,
"err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
- index, node->root->root_key.objectid,
+ index, btrfs_root_id(node->root),
node->inode_id, ret);
btrfs_delayed_item_release_metadata(dir->root, item);
btrfs_release_delayed_item(item);
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index e44e62cf76bc..6cc80fb10da2 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -16,8 +16,7 @@
#include "fs.h"
struct kmem_cache *btrfs_delayed_ref_head_cachep;
-struct kmem_cache *btrfs_delayed_tree_ref_cachep;
-struct kmem_cache *btrfs_delayed_data_ref_cachep;
+struct kmem_cache *btrfs_delayed_ref_node_cachep;
struct kmem_cache *btrfs_delayed_extent_op_cachep;
/*
* delayed back reference update tracking. For subvolume trees
@@ -305,50 +304,19 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
}
/*
- * compare two delayed tree backrefs with same bytenr and type
- */
-static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
- struct btrfs_delayed_tree_ref *ref2)
-{
- if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
- if (ref1->root < ref2->root)
- return -1;
- if (ref1->root > ref2->root)
- return 1;
- } else {
- if (ref1->parent < ref2->parent)
- return -1;
- if (ref1->parent > ref2->parent)
- return 1;
- }
- return 0;
-}
-
-/*
* compare two delayed data backrefs with same bytenr and type
*/
-static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
- struct btrfs_delayed_data_ref *ref2)
+static int comp_data_refs(struct btrfs_delayed_ref_node *ref1,
+ struct btrfs_delayed_ref_node *ref2)
{
- if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
- if (ref1->root < ref2->root)
- return -1;
- if (ref1->root > ref2->root)
- return 1;
- if (ref1->objectid < ref2->objectid)
- return -1;
- if (ref1->objectid > ref2->objectid)
- return 1;
- if (ref1->offset < ref2->offset)
- return -1;
- if (ref1->offset > ref2->offset)
- return 1;
- } else {
- if (ref1->parent < ref2->parent)
- return -1;
- if (ref1->parent > ref2->parent)
- return 1;
- }
+ if (ref1->data_ref.objectid < ref2->data_ref.objectid)
+ return -1;
+ if (ref1->data_ref.objectid > ref2->data_ref.objectid)
+ return 1;
+ if (ref1->data_ref.offset < ref2->data_ref.offset)
+ return -1;
+ if (ref1->data_ref.offset > ref2->data_ref.offset)
+ return 1;
return 0;
}
@@ -362,13 +330,20 @@ static int comp_refs(struct btrfs_delayed_ref_node *ref1,
return -1;
if (ref1->type > ref2->type)
return 1;
- if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
- ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
- ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
- btrfs_delayed_node_to_tree_ref(ref2));
- else
- ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
- btrfs_delayed_node_to_data_ref(ref2));
+ if (ref1->type == BTRFS_SHARED_BLOCK_REF_KEY ||
+ ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
+ if (ref1->parent < ref2->parent)
+ return -1;
+ if (ref1->parent > ref2->parent)
+ return 1;
+ } else {
+ if (ref1->ref_root < ref2->ref_root)
+ return -1;
+ if (ref1->ref_root > ref2->ref_root)
+ return -1;
+ if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY)
+ ret = comp_data_refs(ref1, ref2);
+ }
if (ret)
return ret;
if (check_seq) {
@@ -828,18 +803,20 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
}
static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
+ struct btrfs_ref *generic_ref,
struct btrfs_qgroup_extent_record *qrecord,
- u64 bytenr, u64 num_bytes, u64 ref_root,
- u64 reserved, int action, bool is_data,
- bool is_system, u64 owning_root)
+ u64 reserved)
{
int count_mod = 1;
bool must_insert_reserved = false;
/* If reserved is provided, it must be a data extent. */
- BUG_ON(!is_data && reserved);
+ BUG_ON(generic_ref->type != BTRFS_REF_DATA && reserved);
- switch (action) {
+ switch (generic_ref->action) {
+ case BTRFS_ADD_DELAYED_REF:
+ /* count_mod is already set to 1. */
+ break;
case BTRFS_UPDATE_DELAYED_HEAD:
count_mod = 0;
break;
@@ -868,14 +845,14 @@ static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
}
refcount_set(&head_ref->refs, 1);
- head_ref->bytenr = bytenr;
- head_ref->num_bytes = num_bytes;
+ head_ref->bytenr = generic_ref->bytenr;
+ head_ref->num_bytes = generic_ref->num_bytes;
head_ref->ref_mod = count_mod;
head_ref->reserved_bytes = reserved;
head_ref->must_insert_reserved = must_insert_reserved;
- head_ref->owning_root = owning_root;
- head_ref->is_data = is_data;
- head_ref->is_system = is_system;
+ head_ref->owning_root = generic_ref->owning_root;
+ head_ref->is_data = (generic_ref->type == BTRFS_REF_DATA);
+ head_ref->is_system = (generic_ref->ref_root == BTRFS_CHUNK_TREE_OBJECTID);
head_ref->ref_tree = RB_ROOT_CACHED;
INIT_LIST_HEAD(&head_ref->ref_add_list);
RB_CLEAR_NODE(&head_ref->href_node);
@@ -885,12 +862,12 @@ static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
mutex_init(&head_ref->mutex);
if (qrecord) {
- if (ref_root && reserved) {
+ if (generic_ref->ref_root && reserved) {
qrecord->data_rsv = reserved;
- qrecord->data_rsv_refroot = ref_root;
+ qrecord->data_rsv_refroot = generic_ref->ref_root;
}
- qrecord->bytenr = bytenr;
- qrecord->num_bytes = num_bytes;
+ qrecord->bytenr = generic_ref->bytenr;
+ qrecord->num_bytes = generic_ref->num_bytes;
qrecord->old_roots = NULL;
}
}
@@ -982,49 +959,45 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
*/
static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_node *ref,
- u64 bytenr, u64 num_bytes, u64 ref_root,
- int action, u8 ref_type)
+ struct btrfs_ref *generic_ref)
{
+ int action = generic_ref->action;
u64 seq = 0;
if (action == BTRFS_ADD_DELAYED_EXTENT)
action = BTRFS_ADD_DELAYED_REF;
- if (is_fstree(ref_root))
+ if (is_fstree(generic_ref->ref_root))
seq = atomic64_read(&fs_info->tree_mod_seq);
refcount_set(&ref->refs, 1);
- ref->bytenr = bytenr;
- ref->num_bytes = num_bytes;
+ ref->bytenr = generic_ref->bytenr;
+ ref->num_bytes = generic_ref->num_bytes;
ref->ref_mod = 1;
ref->action = action;
ref->seq = seq;
- ref->type = ref_type;
+ ref->type = btrfs_ref_type(generic_ref);
+ ref->ref_root = generic_ref->ref_root;
+ ref->parent = generic_ref->parent;
RB_CLEAR_NODE(&ref->ref_node);
INIT_LIST_HEAD(&ref->add_list);
-}
-void btrfs_init_generic_ref(struct btrfs_ref *generic_ref, int action, u64 bytenr,
- u64 len, u64 parent, u64 owning_root)
-{
- generic_ref->action = action;
- generic_ref->bytenr = bytenr;
- generic_ref->len = len;
- generic_ref->parent = parent;
- generic_ref->owning_root = owning_root;
+ if (generic_ref->type == BTRFS_REF_DATA)
+ ref->data_ref = generic_ref->data_ref;
+ else
+ ref->tree_ref = generic_ref->tree_ref;
}
-void btrfs_init_tree_ref(struct btrfs_ref *generic_ref, int level, u64 root,
- u64 mod_root, bool skip_qgroup)
+void btrfs_init_tree_ref(struct btrfs_ref *generic_ref, int level, u64 mod_root,
+ bool skip_qgroup)
{
#ifdef CONFIG_BTRFS_FS_REF_VERIFY
/* If @real_root not set, use @root as fallback */
- generic_ref->real_root = mod_root ?: root;
+ generic_ref->real_root = mod_root ?: generic_ref->ref_root;
#endif
generic_ref->tree_ref.level = level;
- generic_ref->tree_ref.ref_root = root;
generic_ref->type = BTRFS_REF_METADATA;
- if (skip_qgroup || !(is_fstree(root) &&
+ if (skip_qgroup || !(is_fstree(generic_ref->ref_root) &&
(!mod_root || is_fstree(mod_root))))
generic_ref->skip_qgroup = true;
else
@@ -1032,85 +1005,58 @@ void btrfs_init_tree_ref(struct btrfs_ref *generic_ref, int level, u64 root,
}
-void btrfs_init_data_ref(struct btrfs_ref *generic_ref, u64 ref_root, u64 ino,
- u64 offset, u64 mod_root, bool skip_qgroup)
+void btrfs_init_data_ref(struct btrfs_ref *generic_ref, u64 ino, u64 offset,
+ u64 mod_root, bool skip_qgroup)
{
#ifdef CONFIG_BTRFS_FS_REF_VERIFY
/* If @real_root not set, use @root as fallback */
- generic_ref->real_root = mod_root ?: ref_root;
+ generic_ref->real_root = mod_root ?: generic_ref->ref_root;
#endif
- generic_ref->data_ref.ref_root = ref_root;
- generic_ref->data_ref.ino = ino;
+ generic_ref->data_ref.objectid = ino;
generic_ref->data_ref.offset = offset;
generic_ref->type = BTRFS_REF_DATA;
- if (skip_qgroup || !(is_fstree(ref_root) &&
+ if (skip_qgroup || !(is_fstree(generic_ref->ref_root) &&
(!mod_root || is_fstree(mod_root))))
generic_ref->skip_qgroup = true;
else
generic_ref->skip_qgroup = false;
}
-/*
- * add a delayed tree ref. This does all of the accounting required
- * to make sure the delayed ref is eventually processed before this
- * transaction commits.
- */
-int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
- struct btrfs_ref *generic_ref,
- struct btrfs_delayed_extent_op *extent_op)
+static int add_delayed_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_ref *generic_ref,
+ struct btrfs_delayed_extent_op *extent_op,
+ u64 reserved)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_delayed_tree_ref *ref;
+ struct btrfs_delayed_ref_node *node;
struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_qgroup_extent_record *record = NULL;
bool qrecord_inserted;
- bool is_system;
- bool merged;
int action = generic_ref->action;
- int level = generic_ref->tree_ref.level;
- u64 bytenr = generic_ref->bytenr;
- u64 num_bytes = generic_ref->len;
- u64 parent = generic_ref->parent;
- u8 ref_type;
-
- is_system = (generic_ref->tree_ref.ref_root == BTRFS_CHUNK_TREE_OBJECTID);
+ bool merged;
- ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
- ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
- if (!ref)
+ node = kmem_cache_alloc(btrfs_delayed_ref_node_cachep, GFP_NOFS);
+ if (!node)
return -ENOMEM;
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
if (!head_ref) {
- kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
+ kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
return -ENOMEM;
}
if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) {
record = kzalloc(sizeof(*record), GFP_NOFS);
if (!record) {
- kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
+ kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
return -ENOMEM;
}
}
- if (parent)
- ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
- else
- ref_type = BTRFS_TREE_BLOCK_REF_KEY;
-
- init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
- generic_ref->tree_ref.ref_root, action,
- ref_type);
- ref->root = generic_ref->tree_ref.ref_root;
- ref->parent = parent;
- ref->level = level;
-
- init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
- generic_ref->tree_ref.ref_root, 0, action,
- false, is_system, generic_ref->owning_root);
+ init_delayed_ref_common(fs_info, node, generic_ref);
+ init_delayed_ref_head(head_ref, generic_ref, record, reserved);
head_ref->extent_op = extent_op;
delayed_refs = &trans->transaction->delayed_refs;
@@ -1123,7 +1069,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
head_ref = add_delayed_ref_head(trans, head_ref, record,
action, &qrecord_inserted);
- merged = insert_delayed_ref(trans, head_ref, &ref->node);
+ merged = insert_delayed_ref(trans, head_ref, node);
spin_unlock(&delayed_refs->lock);
/*
@@ -1132,107 +1078,39 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
*/
btrfs_update_delayed_refs_rsv(trans);
- trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
- action == BTRFS_ADD_DELAYED_EXTENT ?
- BTRFS_ADD_DELAYED_REF : action);
+ if (generic_ref->type == BTRFS_REF_DATA)
+ trace_add_delayed_data_ref(trans->fs_info, node);
+ else
+ trace_add_delayed_tree_ref(trans->fs_info, node);
if (merged)
- kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
+ kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
if (qrecord_inserted)
- btrfs_qgroup_trace_extent_post(trans, record);
-
+ return btrfs_qgroup_trace_extent_post(trans, record);
return 0;
}
/*
+ * Add a delayed tree ref. This does all of the accounting required to make sure
+ * the delayed ref is eventually processed before this transaction commits.
+ */
+int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_ref *generic_ref,
+ struct btrfs_delayed_extent_op *extent_op)
+{
+ ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
+ return add_delayed_ref(trans, generic_ref, extent_op, 0);
+}
+
+/*
* add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
*/
int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
struct btrfs_ref *generic_ref,
u64 reserved)
{
- struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_delayed_data_ref *ref;
- struct btrfs_delayed_ref_head *head_ref;
- struct btrfs_delayed_ref_root *delayed_refs;
- struct btrfs_qgroup_extent_record *record = NULL;
- bool qrecord_inserted;
- int action = generic_ref->action;
- bool merged;
- u64 bytenr = generic_ref->bytenr;
- u64 num_bytes = generic_ref->len;
- u64 parent = generic_ref->parent;
- u64 ref_root = generic_ref->data_ref.ref_root;
- u64 owner = generic_ref->data_ref.ino;
- u64 offset = generic_ref->data_ref.offset;
- u8 ref_type;
-
- ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
- ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
- if (!ref)
- return -ENOMEM;
-
- if (parent)
- ref_type = BTRFS_SHARED_DATA_REF_KEY;
- else
- ref_type = BTRFS_EXTENT_DATA_REF_KEY;
- init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
- ref_root, action, ref_type);
- ref->root = ref_root;
- ref->parent = parent;
- ref->objectid = owner;
- ref->offset = offset;
-
-
- head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
- if (!head_ref) {
- kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
- return -ENOMEM;
- }
-
- if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) {
- record = kzalloc(sizeof(*record), GFP_NOFS);
- if (!record) {
- kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
- kmem_cache_free(btrfs_delayed_ref_head_cachep,
- head_ref);
- return -ENOMEM;
- }
- }
-
- init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
- reserved, action, true, false, generic_ref->owning_root);
- head_ref->extent_op = NULL;
-
- delayed_refs = &trans->transaction->delayed_refs;
- spin_lock(&delayed_refs->lock);
-
- /*
- * insert both the head node and the new ref without dropping
- * the spin lock
- */
- head_ref = add_delayed_ref_head(trans, head_ref, record,
- action, &qrecord_inserted);
-
- merged = insert_delayed_ref(trans, head_ref, &ref->node);
- spin_unlock(&delayed_refs->lock);
-
- /*
- * Need to update the delayed_refs_rsv with any changes we may have
- * made.
- */
- btrfs_update_delayed_refs_rsv(trans);
-
- trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
- action == BTRFS_ADD_DELAYED_EXTENT ?
- BTRFS_ADD_DELAYED_REF : action);
- if (merged)
- kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
-
-
- if (qrecord_inserted)
- return btrfs_qgroup_trace_extent_post(trans, record);
- return 0;
+ ASSERT(generic_ref->type == BTRFS_REF_DATA && generic_ref->action);
+ return add_delayed_ref(trans, generic_ref, NULL, reserved);
}
int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
@@ -1241,13 +1119,18 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
{
struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
+ struct btrfs_ref generic_ref = {
+ .type = BTRFS_REF_METADATA,
+ .action = BTRFS_UPDATE_DELAYED_HEAD,
+ .bytenr = bytenr,
+ .num_bytes = num_bytes,
+ };
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
if (!head_ref)
return -ENOMEM;
- init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
- BTRFS_UPDATE_DELAYED_HEAD, false, false, 0);
+ init_delayed_ref_head(head_ref, &generic_ref, NULL, 0);
head_ref->extent_op = extent_op;
delayed_refs = &trans->transaction->delayed_refs;
@@ -1270,18 +1153,7 @@ void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
{
if (refcount_dec_and_test(&ref->refs)) {
WARN_ON(!RB_EMPTY_NODE(&ref->ref_node));
- switch (ref->type) {
- case BTRFS_TREE_BLOCK_REF_KEY:
- case BTRFS_SHARED_BLOCK_REF_KEY:
- kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
- break;
- case BTRFS_EXTENT_DATA_REF_KEY:
- case BTRFS_SHARED_DATA_REF_KEY:
- kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
- break;
- default:
- BUG();
- }
+ kmem_cache_free(btrfs_delayed_ref_node_cachep, ref);
}
}
@@ -1300,8 +1172,7 @@ btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 byt
void __cold btrfs_delayed_ref_exit(void)
{
kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
- kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
- kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
+ kmem_cache_destroy(btrfs_delayed_ref_node_cachep);
kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
}
@@ -1311,12 +1182,8 @@ int __init btrfs_delayed_ref_init(void)
if (!btrfs_delayed_ref_head_cachep)
goto fail;
- btrfs_delayed_tree_ref_cachep = KMEM_CACHE(btrfs_delayed_tree_ref, 0);
- if (!btrfs_delayed_tree_ref_cachep)
- goto fail;
-
- btrfs_delayed_data_ref_cachep = KMEM_CACHE(btrfs_delayed_data_ref, 0);
- if (!btrfs_delayed_data_ref_cachep)
+ btrfs_delayed_ref_node_cachep = KMEM_CACHE(btrfs_delayed_ref_node, 0);
+ if (!btrfs_delayed_ref_node_cachep)
goto fail;
btrfs_delayed_extent_op_cachep = KMEM_CACHE(btrfs_delayed_extent_op, 0);
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index b291147cb8ab..04b180ebe1fe 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -30,6 +30,32 @@ enum btrfs_delayed_ref_action {
BTRFS_UPDATE_DELAYED_HEAD,
} __packed;
+struct btrfs_data_ref {
+ /* For EXTENT_DATA_REF */
+
+ /* Inode which refers to this data extent */
+ u64 objectid;
+
+ /*
+ * file_offset - extent_offset
+ *
+ * file_offset is the key.offset of the EXTENT_DATA key.
+ * extent_offset is btrfs_file_extent_offset() of the EXTENT_DATA data.
+ */
+ u64 offset;
+};
+
+struct btrfs_tree_ref {
+ /*
+ * Level of this tree block.
+ *
+ * Shared for skinny (TREE_BLOCK_REF) and normal tree ref.
+ */
+ int level;
+
+ /* For non-skinny metadata, no special member needed */
+};
+
struct btrfs_delayed_ref_node {
struct rb_node ref_node;
/*
@@ -48,6 +74,15 @@ struct btrfs_delayed_ref_node {
/* seq number to keep track of insertion order */
u64 seq;
+ /* The ref_root for this ref */
+ u64 ref_root;
+
+ /*
+ * The parent for this ref, if this isn't set the ref_root is the
+ * reference owner.
+ */
+ u64 parent;
+
/* ref count on this data structure */
refcount_t refs;
@@ -64,6 +99,11 @@ struct btrfs_delayed_ref_node {
unsigned int action:8;
unsigned int type:8;
+
+ union {
+ struct btrfs_tree_ref tree_ref;
+ struct btrfs_data_ref data_ref;
+ };
};
struct btrfs_delayed_extent_op {
@@ -151,21 +191,6 @@ struct btrfs_delayed_ref_head {
bool processing;
};
-struct btrfs_delayed_tree_ref {
- struct btrfs_delayed_ref_node node;
- u64 root;
- u64 parent;
- int level;
-};
-
-struct btrfs_delayed_data_ref {
- struct btrfs_delayed_ref_node node;
- u64 root;
- u64 parent;
- u64 objectid;
- u64 offset;
-};
-
enum btrfs_delayed_ref_flags {
/* Indicate that we are flushing delayed refs for the commit */
BTRFS_DELAYED_REFS_FLUSHING,
@@ -214,42 +239,6 @@ enum btrfs_ref_type {
BTRFS_REF_LAST,
} __packed;
-struct btrfs_data_ref {
- /* For EXTENT_DATA_REF */
-
- /* Root which owns this data reference. */
- u64 ref_root;
-
- /* Inode which refers to this data extent */
- u64 ino;
-
- /*
- * file_offset - extent_offset
- *
- * file_offset is the key.offset of the EXTENT_DATA key.
- * extent_offset is btrfs_file_extent_offset() of the EXTENT_DATA data.
- */
- u64 offset;
-};
-
-struct btrfs_tree_ref {
- /*
- * Level of this tree block
- *
- * Shared for skinny (TREE_BLOCK_REF) and normal tree ref.
- */
- int level;
-
- /*
- * Root which owns this tree block reference.
- *
- * For TREE_BLOCK_REF (skinny metadata, either inline or keyed)
- */
- u64 ref_root;
-
- /* For non-skinny metadata, no special member needed */
-};
-
struct btrfs_ref {
enum btrfs_ref_type type;
enum btrfs_delayed_ref_action action;
@@ -267,9 +256,15 @@ struct btrfs_ref {
u64 real_root;
#endif
u64 bytenr;
- u64 len;
+ u64 num_bytes;
u64 owning_root;
+ /*
+ * The root that owns the reference for this reference, this will be set
+ * or ->parent will be set, depending on what type of reference this is.
+ */
+ u64 ref_root;
+
/* Bytenr of the parent tree block */
u64 parent;
union {
@@ -279,8 +274,7 @@ struct btrfs_ref {
};
extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
-extern struct kmem_cache *btrfs_delayed_tree_ref_cachep;
-extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
+extern struct kmem_cache *btrfs_delayed_ref_node_cachep;
extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
int __init btrfs_delayed_ref_init(void);
@@ -318,12 +312,10 @@ static inline u64 btrfs_calc_delayed_ref_csum_bytes(const struct btrfs_fs_info *
return btrfs_calc_metadata_size(fs_info, num_csum_items);
}
-void btrfs_init_generic_ref(struct btrfs_ref *generic_ref, int action, u64 bytenr,
- u64 len, u64 parent, u64 owning_root);
-void btrfs_init_tree_ref(struct btrfs_ref *generic_ref, int level, u64 root,
+void btrfs_init_tree_ref(struct btrfs_ref *generic_ref, int level, u64 mod_root,
+ bool skip_qgroup);
+void btrfs_init_data_ref(struct btrfs_ref *generic_ref, u64 ino, u64 offset,
u64 mod_root, bool skip_qgroup);
-void btrfs_init_data_ref(struct btrfs_ref *generic_ref, u64 ref_root, u64 ino,
- u64 offset, u64 mod_root, bool skip_qgroup);
static inline struct btrfs_delayed_extent_op *
btrfs_alloc_delayed_extent_op(void)
@@ -398,19 +390,39 @@ void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
u64 num_bytes);
bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
-/*
- * helper functions to cast a node into its container
- */
-static inline struct btrfs_delayed_tree_ref *
-btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node)
+static inline u64 btrfs_delayed_ref_owner(struct btrfs_delayed_ref_node *node)
+{
+ if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
+ node->type == BTRFS_SHARED_DATA_REF_KEY)
+ return node->data_ref.objectid;
+ return node->tree_ref.level;
+}
+
+static inline u64 btrfs_delayed_ref_offset(struct btrfs_delayed_ref_node *node)
{
- return container_of(node, struct btrfs_delayed_tree_ref, node);
+ if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
+ node->type == BTRFS_SHARED_DATA_REF_KEY)
+ return node->data_ref.offset;
+ return 0;
}
-static inline struct btrfs_delayed_data_ref *
-btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node)
+static inline u8 btrfs_ref_type(struct btrfs_ref *ref)
{
- return container_of(node, struct btrfs_delayed_data_ref, node);
+ ASSERT(ref->type == BTRFS_REF_DATA || ref->type == BTRFS_REF_METADATA);
+
+ if (ref->type == BTRFS_REF_DATA) {
+ if (ref->parent)
+ return BTRFS_SHARED_DATA_REF_KEY;
+ else
+ return BTRFS_EXTENT_DATA_REF_KEY;
+ } else {
+ if (ref->parent)
+ return BTRFS_SHARED_BLOCK_REF_KEY;
+ else
+ return BTRFS_TREE_BLOCK_REF_KEY;
+ }
+
+ return 0;
}
#endif
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 3df5477d48a8..a91a8056758a 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -646,7 +646,7 @@ struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
u64 objectid)
{
- bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
+ bool dummy = btrfs_is_testing(fs_info);
memset(&root->root_key, 0, sizeof(root->root_key));
memset(&root->root_item, 0, sizeof(root->root_item));
@@ -663,8 +663,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
root->nr_delalloc_inodes = 0;
root->nr_ordered_extents = 0;
root->inode_tree = RB_ROOT;
- /* GFP flags are compatible with XA_FLAGS_*. */
- xa_init_flags(&root->delayed_nodes, GFP_ATOMIC);
+ xa_init(&root->delayed_nodes);
btrfs_init_root_block_rsv(root);
@@ -776,7 +775,7 @@ int btrfs_global_root_insert(struct btrfs_root *root)
if (tmp) {
ret = -EEXIST;
btrfs_warn(fs_info, "global root %llu %llu already exists",
- root->root_key.objectid, root->root_key.offset);
+ btrfs_root_id(root), root->root_key.offset);
}
return ret;
}
@@ -1012,7 +1011,7 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
}
log_root->last_trans = trans->transid;
- log_root->root_key.offset = root->root_key.objectid;
+ log_root->root_key.offset = btrfs_root_id(root);
inode_item = &log_root->root_item.inode;
btrfs_set_stack_inode_generation(inode_item, 1);
@@ -1076,15 +1075,15 @@ static struct btrfs_root *read_tree_root_path(struct btrfs_root *tree_root,
* For real fs, and not log/reloc trees, root owner must
* match its root node owner
*/
- if (!test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state) &&
- root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID &&
- root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
- root->root_key.objectid != btrfs_header_owner(root->node)) {
+ if (!btrfs_is_testing(fs_info) &&
+ btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID &&
+ btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID &&
+ btrfs_root_id(root) != btrfs_header_owner(root->node)) {
btrfs_crit(fs_info,
"root=%llu block=%llu, tree root owner mismatch, have %llu expect %llu",
- root->root_key.objectid, root->node->start,
+ btrfs_root_id(root), root->node->start,
btrfs_header_owner(root->node),
- root->root_key.objectid);
+ btrfs_root_id(root));
ret = -EUCLEAN;
goto fail;
}
@@ -1121,9 +1120,9 @@ static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
btrfs_drew_lock_init(&root->snapshot_lock);
- if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID &&
+ if (btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID &&
!btrfs_is_data_reloc_root(root) &&
- is_fstree(root->root_key.objectid)) {
+ is_fstree(btrfs_root_id(root))) {
set_bit(BTRFS_ROOT_SHAREABLE, &root->state);
btrfs_check_and_init_root_item(&root->root_item);
}
@@ -1132,7 +1131,7 @@ static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
* Don't assign anonymous block device to roots that are not exposed to
* userspace, the id pool is limited to 1M
*/
- if (is_fstree(root->root_key.objectid) &&
+ if (is_fstree(btrfs_root_id(root)) &&
btrfs_root_refs(&root->root_item) > 0) {
if (!anon_dev) {
ret = get_anon_bdev(&root->anon_dev);
@@ -1219,7 +1218,7 @@ int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
spin_lock(&fs_info->fs_roots_radix_lock);
ret = radix_tree_insert(&fs_info->fs_roots_radix,
- (unsigned long)root->root_key.objectid,
+ (unsigned long)btrfs_root_id(root),
root);
if (ret == 0) {
btrfs_grab_root(root);
@@ -1266,9 +1265,14 @@ static void free_global_roots(struct btrfs_fs_info *fs_info)
void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
{
+ struct percpu_counter *em_counter = &fs_info->evictable_extent_maps;
+
percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
percpu_counter_destroy(&fs_info->delalloc_bytes);
percpu_counter_destroy(&fs_info->ordered_bytes);
+ if (percpu_counter_initialized(em_counter))
+ ASSERT(percpu_counter_sum_positive(em_counter) == 0);
+ percpu_counter_destroy(em_counter);
percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
btrfs_free_csum_hash(fs_info);
btrfs_free_stripe_hash_table(fs_info);
@@ -2584,7 +2588,7 @@ static int load_super_root(struct btrfs_root *root, u64 bytenr, u64 gen, int lev
struct btrfs_tree_parent_check check = {
.level = level,
.transid = gen,
- .owner_root = root->root_key.objectid
+ .owner_root = btrfs_root_id(root)
};
int ret = 0;
@@ -2848,6 +2852,10 @@ static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block
if (ret)
return ret;
+ ret = percpu_counter_init(&fs_info->evictable_extent_maps, 0, GFP_KERNEL);
+ if (ret)
+ return ret;
+
ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
if (ret)
return ret;
@@ -2930,7 +2938,7 @@ static int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
spin_unlock(&fs_info->fs_roots_radix_lock);
break;
}
- root_objectid = gang[ret - 1]->root_key.objectid + 1;
+ root_objectid = btrfs_root_id(gang[ret - 1]) + 1;
for (i = 0; i < ret; i++) {
/* Avoid to grab roots in dead_roots. */
@@ -2946,7 +2954,7 @@ static int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
for (i = 0; i < ret; i++) {
if (!gang[i])
continue;
- root_objectid = gang[i]->root_key.objectid;
+ root_objectid = btrfs_root_id(gang[i]);
err = btrfs_orphan_cleanup(gang[i]);
if (err)
goto out;
@@ -3618,28 +3626,25 @@ ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
static void btrfs_end_super_write(struct bio *bio)
{
struct btrfs_device *device = bio->bi_private;
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
- struct page *page;
-
- bio_for_each_segment_all(bvec, bio, iter_all) {
- page = bvec->bv_page;
+ struct folio_iter fi;
+ bio_for_each_folio_all(fi, bio) {
if (bio->bi_status) {
btrfs_warn_rl_in_rcu(device->fs_info,
- "lost page write due to IO error on %s (%d)",
+ "lost super block write due to IO error on %s (%d)",
btrfs_dev_name(device),
blk_status_to_errno(bio->bi_status));
- ClearPageUptodate(page);
- SetPageError(page);
btrfs_dev_stat_inc_and_print(device,
BTRFS_DEV_STAT_WRITE_ERRS);
- } else {
- SetPageUptodate(page);
+ /* Ensure failure if the primary sb fails. */
+ if (bio->bi_opf & REQ_FUA)
+ atomic_add(BTRFS_SUPER_PRIMARY_WRITE_ERROR,
+ &device->sb_write_errors);
+ else
+ atomic_inc(&device->sb_write_errors);
}
-
- put_page(page);
- unlock_page(page);
+ folio_unlock(fi.folio);
+ folio_put(fi.folio);
}
bio_put(bio);
@@ -3726,13 +3731,13 @@ struct btrfs_super_block *btrfs_read_dev_super(struct block_device *bdev)
/*
* Write superblock @sb to the @device. Do not wait for completion, all the
- * pages we use for writing are locked.
+ * folios we use for writing are locked.
*
* Write @max_mirrors copies of the superblock, where 0 means default that fit
* the expected device size at commit time. Note that max_mirrors must be
* same for write and wait phases.
*
- * Return number of errors when page is not found or submission fails.
+ * Return number of errors when folio is not found or submission fails.
*/
static int write_dev_supers(struct btrfs_device *device,
struct btrfs_super_block *sb, int max_mirrors)
@@ -3741,19 +3746,21 @@ static int write_dev_supers(struct btrfs_device *device,
struct address_space *mapping = device->bdev->bd_inode->i_mapping;
SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
int i;
- int errors = 0;
int ret;
u64 bytenr, bytenr_orig;
+ atomic_set(&device->sb_write_errors, 0);
+
if (max_mirrors == 0)
max_mirrors = BTRFS_SUPER_MIRROR_MAX;
shash->tfm = fs_info->csum_shash;
for (i = 0; i < max_mirrors; i++) {
- struct page *page;
+ struct folio *folio;
struct bio *bio;
struct btrfs_super_block *disk_super;
+ size_t offset;
bytenr_orig = btrfs_sb_offset(i);
ret = btrfs_sb_log_location(device, i, WRITE, &bytenr);
@@ -3763,7 +3770,7 @@ static int write_dev_supers(struct btrfs_device *device,
btrfs_err(device->fs_info,
"couldn't get super block location for mirror %d",
i);
- errors++;
+ atomic_inc(&device->sb_write_errors);
continue;
}
if (bytenr + BTRFS_SUPER_INFO_SIZE >=
@@ -3776,20 +3783,20 @@ static int write_dev_supers(struct btrfs_device *device,
BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE,
sb->csum);
- page = find_or_create_page(mapping, bytenr >> PAGE_SHIFT,
- GFP_NOFS);
- if (!page) {
+ folio = __filemap_get_folio(mapping, bytenr >> PAGE_SHIFT,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
+ GFP_NOFS);
+ if (IS_ERR(folio)) {
btrfs_err(device->fs_info,
"couldn't get super block page for bytenr %llu",
bytenr);
- errors++;
+ atomic_inc(&device->sb_write_errors);
continue;
}
+ ASSERT(folio_order(folio) == 0);
- /* Bump the refcount for wait_dev_supers() */
- get_page(page);
-
- disk_super = page_address(page);
+ offset = offset_in_folio(folio, bytenr);
+ disk_super = folio_address(folio) + offset;
memcpy(disk_super, sb, BTRFS_SUPER_INFO_SIZE);
/*
@@ -3803,8 +3810,7 @@ static int write_dev_supers(struct btrfs_device *device,
bio->bi_iter.bi_sector = bytenr >> SECTOR_SHIFT;
bio->bi_private = device;
bio->bi_end_io = btrfs_end_super_write;
- __bio_add_page(bio, page, BTRFS_SUPER_INFO_SIZE,
- offset_in_page(bytenr));
+ bio_add_folio_nofail(bio, folio, BTRFS_SUPER_INFO_SIZE, offset);
/*
* We FUA only the first super block. The others we allow to
@@ -3816,17 +3822,17 @@ static int write_dev_supers(struct btrfs_device *device,
submit_bio(bio);
if (btrfs_advance_sb_log(device, i))
- errors++;
+ atomic_inc(&device->sb_write_errors);
}
- return errors < i ? 0 : -1;
+ return atomic_read(&device->sb_write_errors) < i ? 0 : -1;
}
/*
* Wait for write completion of superblocks done by write_dev_supers,
* @max_mirrors same for write and wait phases.
*
- * Return number of errors when page is not found or not marked up to
- * date.
+ * Return -1 if primary super block write failed or when there were no super block
+ * copies written. Otherwise 0.
*/
static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
{
@@ -3840,7 +3846,7 @@ static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
max_mirrors = BTRFS_SUPER_MIRROR_MAX;
for (i = 0; i < max_mirrors; i++) {
- struct page *page;
+ struct folio *folio;
ret = btrfs_sb_log_location(device, i, READ, &bytenr);
if (ret == -ENOENT) {
@@ -3855,30 +3861,21 @@ static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
device->commit_total_bytes)
break;
- page = find_get_page(device->bdev->bd_inode->i_mapping,
- bytenr >> PAGE_SHIFT);
- if (!page) {
- errors++;
- if (i == 0)
- primary_failed = true;
+ folio = filemap_get_folio(device->bdev->bd_inode->i_mapping,
+ bytenr >> PAGE_SHIFT);
+ /* If the folio has been removed, then we know it completed. */
+ if (IS_ERR(folio))
continue;
- }
- /* Page is submitted locked and unlocked once the IO completes */
- wait_on_page_locked(page);
- if (PageError(page)) {
- errors++;
- if (i == 0)
- primary_failed = true;
- }
-
- /* Drop our reference */
- put_page(page);
+ ASSERT(folio_order(folio) == 0);
- /* Drop the reference from the writing run */
- put_page(page);
+ /* Folio will be unlocked once the write completes. */
+ folio_wait_locked(folio);
+ folio_put(folio);
}
- /* log error, force error return */
+ errors += atomic_read(&device->sb_write_errors);
+ if (errors >= BTRFS_SUPER_PRIMARY_WRITE_ERROR)
+ primary_failed = true;
if (primary_failed) {
btrfs_err(device->fs_info, "error writing primary super block to device %llu",
device->devid);
@@ -4139,7 +4136,7 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
spin_lock(&fs_info->fs_roots_radix_lock);
radix_tree_delete(&fs_info->fs_roots_radix,
- (unsigned long)root->root_key.objectid);
+ (unsigned long)btrfs_root_id(root));
if (test_and_clear_bit(BTRFS_ROOT_IN_RADIX, &root->state))
drop_ref = true;
spin_unlock(&fs_info->fs_roots_radix_lock);
@@ -4182,9 +4179,6 @@ static void warn_about_uncommitted_trans(struct btrfs_fs_info *fs_info)
struct btrfs_transaction *tmp;
bool found = false;
- if (list_empty(&fs_info->trans_list))
- return;
-
/*
* This function is only called at the very end of close_ctree(),
* thus no other running transaction, no need to take trans_lock.
@@ -4484,7 +4478,7 @@ static void btrfs_drop_all_logs(struct btrfs_fs_info *fs_info)
for (i = 0; i < ret; i++) {
if (!gang[i])
continue;
- root_objectid = gang[i]->root_key.objectid;
+ root_objectid = btrfs_root_id(gang[i]);
btrfs_free_log(NULL, gang[i]);
btrfs_put_root(gang[i]);
}
@@ -4815,7 +4809,7 @@ static void btrfs_free_all_qgroup_pertrans(struct btrfs_fs_info *fs_info)
btrfs_qgroup_free_meta_all_pertrans(root);
radix_tree_tag_clear(&fs_info->fs_roots_radix,
- (unsigned long)root->root_key.objectid,
+ (unsigned long)btrfs_root_id(root),
BTRFS_ROOT_TRANS_TAG);
}
}
@@ -4844,14 +4838,10 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
cur_trans->state = TRANS_STATE_UNBLOCKED;
wake_up(&fs_info->transaction_wait);
- btrfs_destroy_delayed_inodes(fs_info);
-
btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
EXTENT_DIRTY);
btrfs_destroy_pinned_extent(fs_info, &cur_trans->pinned_extents);
- btrfs_free_all_qgroup_pertrans(fs_info);
-
cur_trans->state =TRANS_STATE_COMPLETED;
wake_up(&cur_trans->commit_wait);
}
@@ -4904,6 +4894,7 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
btrfs_assert_delayed_root_empty(fs_info);
btrfs_destroy_all_delalloc_inodes(fs_info);
btrfs_drop_all_logs(fs_info);
+ btrfs_free_all_qgroup_pertrans(fs_info);
mutex_unlock(&fs_info->transaction_kthread_mutex);
return 0;
@@ -4959,7 +4950,7 @@ int btrfs_get_free_objectid(struct btrfs_root *root, u64 *objectid)
if (unlikely(root->free_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
btrfs_warn(root->fs_info,
"the objectid of root %llu reaches its highest value",
- root->root_key.objectid);
+ btrfs_root_id(root));
ret = -ENOSPC;
goto out;
}
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 8398d345ec5b..9e81f89e76d8 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -34,7 +34,7 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
type = FILEID_BTRFS_WITHOUT_PARENT;
fid->objectid = btrfs_ino(BTRFS_I(inode));
- fid->root_objectid = BTRFS_I(inode)->root->root_key.objectid;
+ fid->root_objectid = btrfs_root_id(BTRFS_I(inode)->root);
fid->gen = inode->i_generation;
if (parent) {
@@ -42,7 +42,7 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
fid->parent_objectid = BTRFS_I(parent)->location.objectid;
fid->parent_gen = parent->i_generation;
- parent_root_id = BTRFS_I(parent)->root->root_key.objectid;
+ parent_root_id = btrfs_root_id(BTRFS_I(parent)->root);
if (parent_root_id != fid->root_objectid) {
fid->parent_root_objectid = parent_root_id;
@@ -160,7 +160,7 @@ struct dentry *btrfs_get_parent(struct dentry *child)
return ERR_PTR(-ENOMEM);
if (btrfs_ino(BTRFS_I(dir)) == BTRFS_FIRST_FREE_OBJECTID) {
- key.objectid = root->root_key.objectid;
+ key.objectid = btrfs_root_id(root);
key.type = BTRFS_ROOT_BACKREF_KEY;
key.offset = (u64)-1;
root = fs_info->tree_root;
@@ -243,7 +243,7 @@ static int btrfs_get_name(struct dentry *parent, char *name,
return -ENOMEM;
if (ino == BTRFS_FIRST_FREE_OBJECTID) {
- key.objectid = BTRFS_I(inode)->root->root_key.objectid;
+ key.objectid = btrfs_root_id(BTRFS_I(inode)->root);
key.type = BTRFS_ROOT_BACKREF_KEY;
key.offset = (u64)-1;
root = fs_info->tree_root;
diff --git a/fs/btrfs/extent-io-tree.c b/fs/btrfs/extent-io-tree.c
index c09b428823d7..ed2cfc3d5d8a 100644
--- a/fs/btrfs/extent-io-tree.c
+++ b/fs/btrfs/extent-io-tree.c
@@ -1059,7 +1059,7 @@ static int __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state *prealloc = NULL;
struct rb_node **p = NULL;
struct rb_node *parent = NULL;
- int err = 0;
+ int ret = 0;
u64 last_start;
u64 last_end;
u32 exclusive_bits = (bits & EXTENT_LOCKED);
@@ -1122,7 +1122,7 @@ hit_next:
if (state->state & exclusive_bits) {
*failed_start = state->start;
cache_state(state, failed_state);
- err = -EEXIST;
+ ret = -EEXIST;
goto out;
}
@@ -1158,7 +1158,7 @@ hit_next:
if (state->state & exclusive_bits) {
*failed_start = start;
cache_state(state, failed_state);
- err = -EEXIST;
+ ret = -EEXIST;
goto out;
}
@@ -1175,12 +1175,12 @@ hit_next:
prealloc = alloc_extent_state_atomic(prealloc);
if (!prealloc)
goto search_again;
- err = split_state(tree, state, prealloc, start);
- if (err)
- extent_io_tree_panic(tree, state, "split", err);
+ ret = split_state(tree, state, prealloc, start);
+ if (ret)
+ extent_io_tree_panic(tree, state, "split", ret);
prealloc = NULL;
- if (err)
+ if (ret)
goto out;
if (state->end <= end) {
set_state_bits(tree, state, bits, changeset);
@@ -1224,8 +1224,8 @@ hit_next:
prealloc->end = this_end;
inserted_state = insert_state(tree, prealloc, bits, changeset);
if (IS_ERR(inserted_state)) {
- err = PTR_ERR(inserted_state);
- extent_io_tree_panic(tree, prealloc, "insert", err);
+ ret = PTR_ERR(inserted_state);
+ extent_io_tree_panic(tree, prealloc, "insert", ret);
}
cache_state(inserted_state, cached_state);
@@ -1244,16 +1244,16 @@ hit_next:
if (state->state & exclusive_bits) {
*failed_start = start;
cache_state(state, failed_state);
- err = -EEXIST;
+ ret = -EEXIST;
goto out;
}
prealloc = alloc_extent_state_atomic(prealloc);
if (!prealloc)
goto search_again;
- err = split_state(tree, state, prealloc, end + 1);
- if (err)
- extent_io_tree_panic(tree, state, "split", err);
+ ret = split_state(tree, state, prealloc, end + 1);
+ if (ret)
+ extent_io_tree_panic(tree, state, "split", ret);
set_state_bits(tree, prealloc, bits, changeset);
cache_state(prealloc, cached_state);
@@ -1275,7 +1275,7 @@ out:
if (prealloc)
free_extent_state(prealloc);
- return err;
+ return ret;
}
@@ -1312,7 +1312,7 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state *prealloc = NULL;
struct rb_node **p = NULL;
struct rb_node *parent = NULL;
- int err = 0;
+ int ret = 0;
u64 last_start;
u64 last_end;
bool first_iteration = true;
@@ -1351,7 +1351,7 @@ again:
if (!state) {
prealloc = alloc_extent_state_atomic(prealloc);
if (!prealloc) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto out;
}
prealloc->start = start;
@@ -1402,14 +1402,14 @@ hit_next:
if (state->start < start) {
prealloc = alloc_extent_state_atomic(prealloc);
if (!prealloc) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto out;
}
- err = split_state(tree, state, prealloc, start);
- if (err)
- extent_io_tree_panic(tree, state, "split", err);
+ ret = split_state(tree, state, prealloc, start);
+ if (ret)
+ extent_io_tree_panic(tree, state, "split", ret);
prealloc = NULL;
- if (err)
+ if (ret)
goto out;
if (state->end <= end) {
set_state_bits(tree, state, bits, NULL);
@@ -1442,7 +1442,7 @@ hit_next:
prealloc = alloc_extent_state_atomic(prealloc);
if (!prealloc) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto out;
}
@@ -1454,8 +1454,8 @@ hit_next:
prealloc->end = this_end;
inserted_state = insert_state(tree, prealloc, bits, NULL);
if (IS_ERR(inserted_state)) {
- err = PTR_ERR(inserted_state);
- extent_io_tree_panic(tree, prealloc, "insert", err);
+ ret = PTR_ERR(inserted_state);
+ extent_io_tree_panic(tree, prealloc, "insert", ret);
}
cache_state(inserted_state, cached_state);
if (inserted_state == prealloc)
@@ -1472,13 +1472,13 @@ hit_next:
if (state->start <= end && state->end > end) {
prealloc = alloc_extent_state_atomic(prealloc);
if (!prealloc) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto out;
}
- err = split_state(tree, state, prealloc, end + 1);
- if (err)
- extent_io_tree_panic(tree, state, "split", err);
+ ret = split_state(tree, state, prealloc, end + 1);
+ if (ret)
+ extent_io_tree_panic(tree, state, "split", ret);
set_state_bits(tree, prealloc, bits, NULL);
cache_state(prealloc, cached_state);
@@ -1500,7 +1500,7 @@ out:
if (prealloc)
free_extent_state(prealloc);
- return err;
+ return ret;
}
/*
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index beedd6ed64d3..47d48233b592 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -46,9 +46,7 @@
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *href,
- struct btrfs_delayed_ref_node *node, u64 parent,
- u64 root_objectid, u64 owner_objectid,
- u64 owner_offset,
+ struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extra_op);
static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
struct extent_buffer *leaf,
@@ -448,9 +446,8 @@ static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
struct btrfs_extent_data_ref *ref;
struct extent_buffer *leaf;
u32 nritems;
- int ret;
int recow;
- int err = -ENOENT;
+ int ret;
key.objectid = bytenr;
if (parent) {
@@ -464,26 +461,26 @@ static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
again:
recow = 0;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
- if (ret < 0) {
- err = ret;
- goto fail;
- }
+ if (ret < 0)
+ return ret;
if (parent) {
- if (!ret)
- return 0;
- goto fail;
+ if (ret)
+ return -ENOENT;
+ return 0;
}
+ ret = -ENOENT;
leaf = path->nodes[0];
nritems = btrfs_header_nritems(leaf);
while (1) {
if (path->slots[0] >= nritems) {
ret = btrfs_next_leaf(root, path);
- if (ret < 0)
- err = ret;
- if (ret)
- goto fail;
+ if (ret) {
+ if (ret > 1)
+ return -ENOENT;
+ return ret;
+ }
leaf = path->nodes[0];
nritems = btrfs_header_nritems(leaf);
@@ -504,37 +501,37 @@ again:
btrfs_release_path(path);
goto again;
}
- err = 0;
+ ret = 0;
break;
}
path->slots[0]++;
}
fail:
- return err;
+ return ret;
}
static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
- u64 bytenr, u64 parent,
- u64 root_objectid, u64 owner,
- u64 offset, int refs_to_add)
+ struct btrfs_delayed_ref_node *node,
+ u64 bytenr)
{
struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr);
struct btrfs_key key;
struct extent_buffer *leaf;
+ u64 owner = btrfs_delayed_ref_owner(node);
+ u64 offset = btrfs_delayed_ref_offset(node);
u32 size;
u32 num_refs;
int ret;
key.objectid = bytenr;
- if (parent) {
+ if (node->parent) {
key.type = BTRFS_SHARED_DATA_REF_KEY;
- key.offset = parent;
+ key.offset = node->parent;
size = sizeof(struct btrfs_shared_data_ref);
} else {
key.type = BTRFS_EXTENT_DATA_REF_KEY;
- key.offset = hash_extent_data_ref(root_objectid,
- owner, offset);
+ key.offset = hash_extent_data_ref(node->ref_root, owner, offset);
size = sizeof(struct btrfs_extent_data_ref);
}
@@ -543,15 +540,15 @@ static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
goto fail;
leaf = path->nodes[0];
- if (parent) {
+ if (node->parent) {
struct btrfs_shared_data_ref *ref;
ref = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_shared_data_ref);
if (ret == 0) {
- btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
+ btrfs_set_shared_data_ref_count(leaf, ref, node->ref_mod);
} else {
num_refs = btrfs_shared_data_ref_count(leaf, ref);
- num_refs += refs_to_add;
+ num_refs += node->ref_mod;
btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
}
} else {
@@ -559,7 +556,7 @@ static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
while (ret == -EEXIST) {
ref = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_data_ref);
- if (match_extent_data_ref(leaf, ref, root_objectid,
+ if (match_extent_data_ref(leaf, ref, node->ref_root,
owner, offset))
break;
btrfs_release_path(path);
@@ -574,14 +571,13 @@ static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
ref = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_extent_data_ref);
if (ret == 0) {
- btrfs_set_extent_data_ref_root(leaf, ref,
- root_objectid);
+ btrfs_set_extent_data_ref_root(leaf, ref, node->ref_root);
btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
btrfs_set_extent_data_ref_offset(leaf, ref, offset);
- btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
+ btrfs_set_extent_data_ref_count(leaf, ref, node->ref_mod);
} else {
num_refs = btrfs_extent_data_ref_count(leaf, ref);
- num_refs += refs_to_add;
+ num_refs += node->ref_mod;
btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
}
}
@@ -705,20 +701,20 @@ static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
- u64 bytenr, u64 parent,
- u64 root_objectid)
+ struct btrfs_delayed_ref_node *node,
+ u64 bytenr)
{
struct btrfs_root *root = btrfs_extent_root(trans->fs_info, bytenr);
struct btrfs_key key;
int ret;
key.objectid = bytenr;
- if (parent) {
+ if (node->parent) {
key.type = BTRFS_SHARED_BLOCK_REF_KEY;
- key.offset = parent;
+ key.offset = node->parent;
} else {
key.type = BTRFS_TREE_BLOCK_REF_KEY;
- key.offset = root_objectid;
+ key.offset = node->ref_root;
}
ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
@@ -1439,7 +1435,7 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
ASSERT(generic_ref->type != BTRFS_REF_NOT_SET &&
generic_ref->action);
BUG_ON(generic_ref->type == BTRFS_REF_METADATA &&
- generic_ref->tree_ref.ref_root == BTRFS_TREE_LOG_OBJECTID);
+ generic_ref->ref_root == BTRFS_TREE_LOG_OBJECTID);
if (generic_ref->type == BTRFS_REF_METADATA)
ret = btrfs_add_delayed_tree_ref(trans, generic_ref, NULL);
@@ -1462,34 +1458,12 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
* @node: The delayed ref node used to get the bytenr/length for
* extent whose references are incremented.
*
- * @parent: If this is a shared extent (BTRFS_SHARED_DATA_REF_KEY/
- * BTRFS_SHARED_BLOCK_REF_KEY) then it holds the logical
- * bytenr of the parent block. Since new extents are always
- * created with indirect references, this will only be the case
- * when relocating a shared extent. In that case, root_objectid
- * will be BTRFS_TREE_RELOC_OBJECTID. Otherwise, parent must
- * be 0
- *
- * @root_objectid: The id of the root where this modification has originated,
- * this can be either one of the well-known metadata trees or
- * the subvolume id which references this extent.
- *
- * @owner: For data extents it is the inode number of the owning file.
- * For metadata extents this parameter holds the level in the
- * tree of the extent.
- *
- * @offset: For metadata extents the offset is ignored and is currently
- * always passed as 0. For data extents it is the fileoffset
- * this extent belongs to.
- *
* @extent_op Pointer to a structure, holding information necessary when
* updating a tree block's flags
*
*/
static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *node,
- u64 parent, u64 root_objectid,
- u64 owner, u64 offset,
struct btrfs_delayed_extent_op *extent_op)
{
struct btrfs_path *path;
@@ -1498,6 +1472,8 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_key key;
u64 bytenr = node->bytenr;
u64 num_bytes = node->num_bytes;
+ u64 owner = btrfs_delayed_ref_owner(node);
+ u64 offset = btrfs_delayed_ref_offset(node);
u64 refs;
int refs_to_add = node->ref_mod;
int ret;
@@ -1508,7 +1484,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
/* this will setup the path even if it fails to insert the back ref */
ret = insert_inline_extent_backref(trans, path, bytenr, num_bytes,
- parent, root_objectid, owner,
+ node->parent, node->ref_root, owner,
offset, refs_to_add, extent_op);
if ((ret < 0 && ret != -EAGAIN) || !ret)
goto out;
@@ -1531,12 +1507,9 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
/* now insert the actual backref */
if (owner < BTRFS_FIRST_FREE_OBJECTID)
- ret = insert_tree_block_ref(trans, path, bytenr, parent,
- root_objectid);
+ ret = insert_tree_block_ref(trans, path, node, bytenr);
else
- ret = insert_extent_data_ref(trans, path, bytenr, parent,
- root_objectid, owner, offset,
- refs_to_add);
+ ret = insert_extent_data_ref(trans, path, node, bytenr);
if (ret)
btrfs_abort_transaction(trans, ret);
@@ -1569,15 +1542,13 @@ static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
bool insert_reserved)
{
int ret = 0;
- struct btrfs_delayed_data_ref *ref;
u64 parent = 0;
u64 flags = 0;
- ref = btrfs_delayed_node_to_data_ref(node);
- trace_run_delayed_data_ref(trans->fs_info, node, ref, node->action);
+ trace_run_delayed_data_ref(trans->fs_info, node);
if (node->type == BTRFS_SHARED_DATA_REF_KEY)
- parent = ref->parent;
+ parent = node->parent;
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
struct btrfs_key key;
@@ -1588,6 +1559,8 @@ static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
.is_inc = true,
.generation = trans->transid,
};
+ u64 owner = btrfs_delayed_ref_owner(node);
+ u64 offset = btrfs_delayed_ref_offset(node);
if (extent_op)
flags |= extent_op->flags_to_set;
@@ -1596,21 +1569,17 @@ static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
key.type = BTRFS_EXTENT_ITEM_KEY;
key.offset = node->num_bytes;
- ret = alloc_reserved_file_extent(trans, parent, ref->root,
- flags, ref->objectid,
- ref->offset, &key,
- node->ref_mod, href->owning_root);
+ ret = alloc_reserved_file_extent(trans, parent, node->ref_root,
+ flags, owner, offset, &key,
+ node->ref_mod,
+ href->owning_root);
free_head_ref_squota_rsv(trans->fs_info, href);
if (!ret)
ret = btrfs_record_squota_delta(trans->fs_info, &delta);
} else if (node->action == BTRFS_ADD_DELAYED_REF) {
- ret = __btrfs_inc_extent_ref(trans, node, parent, ref->root,
- ref->objectid, ref->offset,
- extent_op);
+ ret = __btrfs_inc_extent_ref(trans, node, extent_op);
} else if (node->action == BTRFS_DROP_DELAYED_REF) {
- ret = __btrfs_free_extent(trans, href, node, parent,
- ref->root, ref->objectid,
- ref->offset, extent_op);
+ ret = __btrfs_free_extent(trans, href, node, extent_op);
} else {
BUG();
}
@@ -1732,16 +1701,14 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
{
int ret = 0;
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_delayed_tree_ref *ref;
u64 parent = 0;
u64 ref_root = 0;
- ref = btrfs_delayed_node_to_tree_ref(node);
- trace_run_delayed_tree_ref(trans->fs_info, node, ref, node->action);
+ trace_run_delayed_tree_ref(trans->fs_info, node);
if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
- parent = ref->parent;
- ref_root = ref->root;
+ parent = node->parent;
+ ref_root = node->ref_root;
if (unlikely(node->ref_mod != 1)) {
btrfs_err(trans->fs_info,
@@ -1764,11 +1731,9 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
if (!ret)
btrfs_record_squota_delta(fs_info, &delta);
} else if (node->action == BTRFS_ADD_DELAYED_REF) {
- ret = __btrfs_inc_extent_ref(trans, node, parent, ref_root,
- ref->level, 0, extent_op);
+ ret = __btrfs_inc_extent_ref(trans, node, extent_op);
} else if (node->action == BTRFS_DROP_DELAYED_REF) {
- ret = __btrfs_free_extent(trans, href, node, parent, ref_root,
- ref->level, 0, extent_op);
+ ret = __btrfs_free_extent(trans, href, node, extent_op);
} else {
BUG();
}
@@ -2292,7 +2257,6 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
{
struct btrfs_delayed_ref_head *head;
struct btrfs_delayed_ref_node *ref;
- struct btrfs_delayed_data_ref *data_ref;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_transaction *cur_trans;
struct rb_node *node;
@@ -2346,6 +2310,9 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
*/
for (node = rb_first_cached(&head->ref_tree); node;
node = rb_next(node)) {
+ u64 ref_owner;
+ u64 ref_offset;
+
ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
/* If it's a shared ref we know a cross reference exists */
if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
@@ -2353,15 +2320,15 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
break;
}
- data_ref = btrfs_delayed_node_to_data_ref(ref);
+ ref_owner = btrfs_delayed_ref_owner(ref);
+ ref_offset = btrfs_delayed_ref_offset(ref);
/*
* If our ref doesn't match the one we're currently looking at
* then we have a cross reference.
*/
- if (data_ref->root != root->root_key.objectid ||
- data_ref->objectid != objectid ||
- data_ref->offset != offset) {
+ if (ref->ref_root != btrfs_root_id(root) ||
+ ref_owner != objectid || ref_offset != offset) {
ret = 1;
break;
}
@@ -2454,8 +2421,7 @@ static noinline int check_committed_ref(struct btrfs_root *root,
ref = (struct btrfs_extent_data_ref *)(&iref->offset);
if (btrfs_extent_refs(leaf, ei) !=
btrfs_extent_data_ref_count(leaf, ref) ||
- btrfs_extent_data_ref_root(leaf, ref) !=
- root->root_key.objectid ||
+ btrfs_extent_data_ref_root(leaf, ref) != btrfs_root_id(root) ||
btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
btrfs_extent_data_ref_offset(leaf, ref) != offset)
goto out;
@@ -2492,14 +2458,11 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
int full_backref, int inc)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- u64 bytenr;
- u64 num_bytes;
u64 parent;
u64 ref_root;
u32 nritems;
struct btrfs_key key;
struct btrfs_file_extent_item *fi;
- struct btrfs_ref generic_ref = { 0 };
bool for_reloc = btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC);
int i;
int action;
@@ -2526,6 +2489,12 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
action = BTRFS_DROP_DELAYED_REF;
for (i = 0; i < nritems; i++) {
+ struct btrfs_ref ref = {
+ .action = action,
+ .parent = parent,
+ .ref_root = ref_root,
+ };
+
if (level == 0) {
btrfs_item_key_to_cpu(buf, &key, i);
if (key.type != BTRFS_EXTENT_DATA_KEY)
@@ -2535,35 +2504,33 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
if (btrfs_file_extent_type(buf, fi) ==
BTRFS_FILE_EXTENT_INLINE)
continue;
- bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
- if (bytenr == 0)
+ ref.bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
+ if (ref.bytenr == 0)
continue;
- num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
+ ref.num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
+ ref.owning_root = ref_root;
+
key.offset -= btrfs_file_extent_offset(buf, fi);
- btrfs_init_generic_ref(&generic_ref, action, bytenr,
- num_bytes, parent, ref_root);
- btrfs_init_data_ref(&generic_ref, ref_root, key.objectid,
- key.offset, root->root_key.objectid,
- for_reloc);
+ btrfs_init_data_ref(&ref, key.objectid, key.offset,
+ btrfs_root_id(root), for_reloc);
if (inc)
- ret = btrfs_inc_extent_ref(trans, &generic_ref);
+ ret = btrfs_inc_extent_ref(trans, &ref);
else
- ret = btrfs_free_extent(trans, &generic_ref);
+ ret = btrfs_free_extent(trans, &ref);
if (ret)
goto fail;
} else {
- bytenr = btrfs_node_blockptr(buf, i);
- num_bytes = fs_info->nodesize;
- /* We don't know the owning_root, use 0. */
- btrfs_init_generic_ref(&generic_ref, action, bytenr,
- num_bytes, parent, 0);
- btrfs_init_tree_ref(&generic_ref, level - 1, ref_root,
- root->root_key.objectid, for_reloc);
+ /* We don't know the owning_root, leave as 0. */
+ ref.bytenr = btrfs_node_blockptr(buf, i);
+ ref.num_bytes = fs_info->nodesize;
+
+ btrfs_init_tree_ref(&ref, level - 1,
+ btrfs_root_id(root), for_reloc);
if (inc)
- ret = btrfs_inc_extent_ref(trans, &generic_ref);
+ ret = btrfs_inc_extent_ref(trans, &ref);
else
- ret = btrfs_free_extent(trans, &generic_ref);
+ ret = btrfs_free_extent(trans, &ref);
if (ret)
goto fail;
}
@@ -3099,9 +3066,7 @@ static int do_free_extent_accounting(struct btrfs_trans_handle *trans,
*/
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *href,
- struct btrfs_delayed_ref_node *node, u64 parent,
- u64 root_objectid, u64 owner_objectid,
- u64 owner_offset,
+ struct btrfs_delayed_ref_node *node,
struct btrfs_delayed_extent_op *extent_op)
{
struct btrfs_fs_info *info = trans->fs_info;
@@ -3121,6 +3086,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
u64 refs;
u64 bytenr = node->bytenr;
u64 num_bytes = node->num_bytes;
+ u64 owner_objectid = btrfs_delayed_ref_owner(node);
+ u64 owner_offset = btrfs_delayed_ref_offset(node);
bool skinny_metadata = btrfs_fs_incompat(info, SKINNY_METADATA);
u64 delayed_ref_root = href->owning_root;
@@ -3146,7 +3113,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
skinny_metadata = false;
ret = lookup_extent_backref(trans, path, &iref, bytenr, num_bytes,
- parent, root_objectid, owner_objectid,
+ node->parent, node->ref_root, owner_objectid,
owner_offset);
if (ret == 0) {
/*
@@ -3248,7 +3215,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
} else if (WARN_ON(ret == -ENOENT)) {
abort_and_dump(trans, path,
"unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu slot %d",
- bytenr, parent, root_objectid, owner_objectid,
+ bytenr, node->parent, node->ref_root, owner_objectid,
owner_offset, path->slots[0]);
goto out;
} else {
@@ -3462,13 +3429,24 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
int ret;
if (root_id != BTRFS_TREE_LOG_OBJECTID) {
- struct btrfs_ref generic_ref = { 0 };
+ struct btrfs_ref generic_ref = {
+ .action = BTRFS_DROP_DELAYED_REF,
+ .bytenr = buf->start,
+ .num_bytes = buf->len,
+ .parent = parent,
+ .owning_root = btrfs_header_owner(buf),
+ .ref_root = root_id,
+ };
- btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF,
- buf->start, buf->len, parent,
- btrfs_header_owner(buf));
- btrfs_init_tree_ref(&generic_ref, btrfs_header_level(buf),
- root_id, 0, false);
+ /*
+ * Assert that the extent buffer is not cleared due to
+ * EXTENT_BUFFER_ZONED_ZEROOUT. Please refer
+ * btrfs_clear_buffer_dirty() and btree_csum_one_bio() for
+ * detail.
+ */
+ ASSERT(btrfs_header_bytenr(buf) != 0);
+
+ btrfs_init_tree_ref(&generic_ref, btrfs_header_level(buf), 0, false);
btrfs_ref_tree_mod(fs_info, &generic_ref);
ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, NULL);
BUG_ON(ret); /* -ENOMEM */
@@ -3547,11 +3525,8 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
* tree log blocks never actually go into the extent allocation
* tree, just update pinning info and exit early.
*/
- if ((ref->type == BTRFS_REF_METADATA &&
- ref->tree_ref.ref_root == BTRFS_TREE_LOG_OBJECTID) ||
- (ref->type == BTRFS_REF_DATA &&
- ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID)) {
- btrfs_pin_extent(trans, ref->bytenr, ref->len, 1);
+ if (ref->ref_root == BTRFS_TREE_LOG_OBJECTID) {
+ btrfs_pin_extent(trans, ref->bytenr, ref->num_bytes, 1);
ret = 0;
} else if (ref->type == BTRFS_REF_METADATA) {
ret = btrfs_add_delayed_tree_ref(trans, ref, NULL);
@@ -3559,10 +3534,7 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
ret = btrfs_add_delayed_data_ref(trans, ref, 0);
}
- if (!((ref->type == BTRFS_REF_METADATA &&
- ref->tree_ref.ref_root == BTRFS_TREE_LOG_OBJECTID) ||
- (ref->type == BTRFS_REF_DATA &&
- ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID)))
+ if (ref->ref_root != BTRFS_TREE_LOG_OBJECTID)
btrfs_ref_tree_mod(fs_info, ref);
return ret;
@@ -4697,7 +4669,7 @@ int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
bool final_tried = num_bytes == min_alloc_size;
u64 flags;
int ret;
- bool for_treelog = (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
+ bool for_treelog = (btrfs_root_id(root) == BTRFS_TREE_LOG_OBJECTID);
bool for_data_reloc = (btrfs_is_data_reloc_root(root) && is_data);
flags = get_alloc_profile_by_root(root, is_data);
@@ -4891,16 +4863,16 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_extent_inline_ref *iref;
struct btrfs_path *path;
struct extent_buffer *leaf;
- struct btrfs_delayed_tree_ref *ref;
u32 size = sizeof(*extent_item) + sizeof(*iref);
u64 flags = extent_op->flags_to_set;
+ /* The owner of a tree block is the level. */
+ int level = btrfs_delayed_ref_owner(node);
bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
- ref = btrfs_delayed_node_to_tree_ref(node);
-
extent_key.objectid = node->bytenr;
if (skinny_metadata) {
- extent_key.offset = ref->level;
+ /* The owner of a tree block is the level. */
+ extent_key.offset = level;
extent_key.type = BTRFS_METADATA_ITEM_KEY;
} else {
extent_key.offset = node->num_bytes;
@@ -4933,18 +4905,18 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
} else {
block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
btrfs_set_tree_block_key(leaf, block_info, &extent_op->key);
- btrfs_set_tree_block_level(leaf, block_info, ref->level);
+ btrfs_set_tree_block_level(leaf, block_info, level);
iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
}
if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) {
btrfs_set_extent_inline_ref_type(leaf, iref,
BTRFS_SHARED_BLOCK_REF_KEY);
- btrfs_set_extent_inline_ref_offset(leaf, iref, ref->parent);
+ btrfs_set_extent_inline_ref_offset(leaf, iref, node->parent);
} else {
btrfs_set_extent_inline_ref_type(leaf, iref,
BTRFS_TREE_BLOCK_REF_KEY);
- btrfs_set_extent_inline_ref_offset(leaf, iref, ref->root);
+ btrfs_set_extent_inline_ref_offset(leaf, iref, node->ref_root);
}
btrfs_mark_buffer_dirty(trans, leaf);
@@ -4958,19 +4930,20 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
u64 offset, u64 ram_bytes,
struct btrfs_key *ins)
{
- struct btrfs_ref generic_ref = { 0 };
- u64 root_objectid = root->root_key.objectid;
- u64 owning_root = root_objectid;
+ struct btrfs_ref generic_ref = {
+ .action = BTRFS_ADD_DELAYED_EXTENT,
+ .bytenr = ins->objectid,
+ .num_bytes = ins->offset,
+ .owning_root = btrfs_root_id(root),
+ .ref_root = btrfs_root_id(root),
+ };
- ASSERT(root_objectid != BTRFS_TREE_LOG_OBJECTID);
+ ASSERT(generic_ref.ref_root != BTRFS_TREE_LOG_OBJECTID);
if (btrfs_is_data_reloc_root(root) && is_fstree(root->relocation_src_root))
- owning_root = root->relocation_src_root;
+ generic_ref.owning_root = root->relocation_src_root;
- btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT,
- ins->objectid, ins->offset, 0, owning_root);
- btrfs_init_data_ref(&generic_ref, root_objectid, owner,
- offset, 0, false);
+ btrfs_init_data_ref(&generic_ref, owner, offset, 0, false);
btrfs_ref_tree_mod(root->fs_info, &generic_ref);
return btrfs_add_delayed_data_ref(trans, &generic_ref, ram_bytes);
@@ -5093,7 +5066,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
*/
btrfs_set_buffer_lockdep_class(lockdep_owner, buf, level);
- __btrfs_tree_lock(buf, nest);
+ btrfs_tree_lock_nested(buf, nest);
btrfs_clear_buffer_dirty(trans, buf);
clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
clear_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &buf->bflags);
@@ -5108,7 +5081,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
btrfs_set_header_owner(buf, owner);
write_extent_buffer_fsid(buf, fs_info->fs_devices->metadata_uuid);
write_extent_buffer_chunk_tree_uuid(buf, fs_info->chunk_tree_uuid);
- if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
+ if (btrfs_root_id(root) == BTRFS_TREE_LOG_OBJECTID) {
buf->log_index = root->log_transid % 2;
/*
* we allow two log transactions at a time, use different
@@ -5149,7 +5122,6 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_block_rsv *block_rsv;
struct extent_buffer *buf;
struct btrfs_delayed_extent_op *extent_op;
- struct btrfs_ref generic_ref = { 0 };
u64 flags = 0;
int ret;
u32 blocksize = fs_info->nodesize;
@@ -5192,6 +5164,14 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
BUG_ON(parent > 0);
if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
+ struct btrfs_ref generic_ref = {
+ .action = BTRFS_ADD_DELAYED_EXTENT,
+ .bytenr = ins.objectid,
+ .num_bytes = ins.offset,
+ .parent = parent,
+ .owning_root = owning_root,
+ .ref_root = root_objectid,
+ };
extent_op = btrfs_alloc_delayed_extent_op();
if (!extent_op) {
ret = -ENOMEM;
@@ -5206,10 +5186,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
extent_op->update_flags = true;
extent_op->level = level;
- btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT,
- ins.objectid, ins.offset, parent, owning_root);
- btrfs_init_tree_ref(&generic_ref, level, root_objectid,
- root->root_key.objectid, false);
+ btrfs_init_tree_ref(&generic_ref, level, btrfs_root_id(root), false);
btrfs_ref_tree_mod(fs_info, &generic_ref);
ret = btrfs_add_delayed_tree_ref(trans, &generic_ref, extent_op);
if (ret)
@@ -5347,8 +5324,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
int ret;
- if (wc->stage == UPDATE_BACKREF &&
- btrfs_header_owner(eb) != root->root_key.objectid)
+ if (wc->stage == UPDATE_BACKREF && btrfs_header_owner(eb) != btrfs_root_id(root))
return 1;
/*
@@ -5422,7 +5398,7 @@ static int check_ref_exists(struct btrfs_trans_handle *trans,
ret = lookup_extent_backref(trans, path, &iref, bytenr,
root->fs_info->nodesize, parent,
- root->root_key.objectid, level, 0);
+ btrfs_root_id(root), level, 0);
btrfs_free_path(path);
if (ret == -ENOENT)
return 0;
@@ -5452,11 +5428,9 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = root->fs_info;
u64 bytenr;
u64 generation;
- u64 parent;
u64 owner_root = 0;
struct btrfs_tree_parent_check check = { 0 };
struct btrfs_key key;
- struct btrfs_ref ref = { 0 };
struct extent_buffer *next;
int level = wc->level;
int reada = 0;
@@ -5480,7 +5454,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
check.level = level - 1;
check.transid = generation;
- check.owner_root = root->root_key.objectid;
+ check.owner_root = btrfs_root_id(root);
check.has_first_key = true;
btrfs_node_key_to_cpu(path->nodes[level], &check.first_key,
path->slots[level]);
@@ -5488,7 +5462,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
next = find_extent_buffer(fs_info, bytenr);
if (!next) {
next = btrfs_find_create_tree_block(fs_info, bytenr,
- root->root_key.objectid, level - 1);
+ btrfs_root_id(root), level - 1);
if (IS_ERR(next))
return PTR_ERR(next);
reada = 1;
@@ -5573,19 +5547,25 @@ skip:
wc->refs[level - 1] = 0;
wc->flags[level - 1] = 0;
if (wc->stage == DROP_REFERENCE) {
+ struct btrfs_ref ref = {
+ .action = BTRFS_DROP_DELAYED_REF,
+ .bytenr = bytenr,
+ .num_bytes = fs_info->nodesize,
+ .owning_root = owner_root,
+ .ref_root = btrfs_root_id(root),
+ };
if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
- parent = path->nodes[level]->start;
+ ref.parent = path->nodes[level]->start;
} else {
- ASSERT(root->root_key.objectid ==
+ ASSERT(btrfs_root_id(root) ==
btrfs_header_owner(path->nodes[level]));
- if (root->root_key.objectid !=
+ if (btrfs_root_id(root) !=
btrfs_header_owner(path->nodes[level])) {
btrfs_err(root->fs_info,
"mismatched block owner");
ret = -EIO;
goto out_unlock;
}
- parent = 0;
}
/*
@@ -5595,7 +5575,7 @@ skip:
* ->restarted flag.
*/
if (wc->restarted) {
- ret = check_ref_exists(trans, root, bytenr, parent,
+ ret = check_ref_exists(trans, root, bytenr, ref.parent,
level - 1);
if (ret < 0)
goto out_unlock;
@@ -5610,8 +5590,7 @@ skip:
* already accounted them at merge time (replace_path),
* thus we could skip expensive subtree trace here.
*/
- if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
- need_account) {
+ if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID && need_account) {
ret = btrfs_qgroup_trace_subtree(trans, next,
generation, level - 1);
if (ret) {
@@ -5630,10 +5609,7 @@ skip:
wc->drop_level = level;
find_next_key(path, level, &wc->drop_progress);
- btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
- fs_info->nodesize, parent, owner_root);
- btrfs_init_tree_ref(&ref, level - 1, root->root_key.objectid,
- 0, false);
+ btrfs_init_tree_ref(&ref, level - 1, 0, false);
ret = btrfs_free_extent(trans, &ref);
if (ret)
goto out_unlock;
@@ -5724,7 +5700,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
else
ret = btrfs_dec_ref(trans, root, eb, 0);
BUG_ON(ret); /* -ENOMEM */
- if (is_fstree(root->root_key.objectid)) {
+ if (is_fstree(btrfs_root_id(root))) {
ret = btrfs_qgroup_trace_leaf_items(trans, eb);
if (ret) {
btrfs_err_rl(fs_info,
@@ -5744,12 +5720,12 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
if (eb == root->node) {
if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
parent = eb->start;
- else if (root->root_key.objectid != btrfs_header_owner(eb))
+ else if (btrfs_root_id(root) != btrfs_header_owner(eb))
goto owner_mismatch;
} else {
if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
parent = path->nodes[level + 1]->start;
- else if (root->root_key.objectid !=
+ else if (btrfs_root_id(root) !=
btrfs_header_owner(path->nodes[level + 1]))
goto owner_mismatch;
}
@@ -5763,7 +5739,7 @@ out:
owner_mismatch:
btrfs_err_rl(fs_info, "unexpected tree owner, have %llu expect %llu",
- btrfs_header_owner(eb), root->root_key.objectid);
+ btrfs_header_owner(eb), btrfs_root_id(root));
return -EUCLEAN;
}
@@ -5849,8 +5825,7 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
*/
int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
{
- const bool is_reloc_root = (root->root_key.objectid ==
- BTRFS_TREE_RELOC_OBJECTID);
+ const bool is_reloc_root = (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID);
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
struct btrfs_trans_handle *trans;
@@ -5864,7 +5839,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
bool root_dropped = false;
bool unfinished_drop = false;
- btrfs_debug(fs_info, "Drop subvolume %llu", root->root_key.objectid);
+ btrfs_debug(fs_info, "Drop subvolume %llu", btrfs_root_id(root));
path = btrfs_alloc_path();
if (!path) {
@@ -6062,8 +6037,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
*
* The most common failure here is just -ENOENT.
*/
- btrfs_del_orphan_item(trans, tree_root,
- root->root_key.objectid);
+ btrfs_del_orphan_item(trans, tree_root, btrfs_root_id(root));
}
}
@@ -6125,9 +6099,8 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
int level;
int parent_level;
int ret = 0;
- int wret;
- BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
+ BUG_ON(btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID);
path = btrfs_alloc_path();
if (!path)
@@ -6161,17 +6134,16 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(fs_info);
while (1) {
- wret = walk_down_tree(trans, root, path, wc);
- if (wret < 0) {
- ret = wret;
+ ret = walk_down_tree(trans, root, path, wc);
+ if (ret < 0)
break;
- }
- wret = walk_up_tree(trans, root, path, wc, parent_level);
- if (wret < 0)
- ret = wret;
- if (wret != 0)
+ ret = walk_up_tree(trans, root, path, wc, parent_level);
+ if (ret) {
+ if (ret > 0)
+ ret = 0;
break;
+ }
}
kfree(wc);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 7441245b1ceb..597387e9f040 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -396,15 +396,14 @@ again:
/* then test to make sure it is all still delalloc */
ret = test_range_bit(tree, delalloc_start, delalloc_end,
EXTENT_DELALLOC, cached_state);
+
+ unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);
if (!ret) {
- unlock_extent(tree, delalloc_start, delalloc_end,
- &cached_state);
__unlock_for_delalloc(inode, locked_page,
delalloc_start, delalloc_end);
cond_resched();
goto again;
}
- free_extent_state(cached_state);
*start = delalloc_start;
*end = delalloc_end;
out_failed:
@@ -413,9 +412,10 @@ out_failed:
void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
struct page *locked_page,
+ struct extent_state **cached,
u32 clear_bits, unsigned long page_ops)
{
- clear_extent_bit(&inode->io_tree, start, end, clear_bits, NULL);
+ clear_extent_bit(&inode->io_tree, start, end, clear_bits, cached);
__process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
start, end, page_ops);
@@ -667,6 +667,37 @@ static void end_bbio_data_read(struct btrfs_bio *bbio)
}
/*
+ * Populate every free slot in a provided array with folios.
+ *
+ * @nr_folios: number of folios to allocate
+ * @folio_array: the array to fill with folios; any existing non-NULL entries in
+ * the array will be skipped
+ * @extra_gfp: the extra GFP flags for the allocation
+ *
+ * Return: 0 if all folios were able to be allocated;
+ * -ENOMEM otherwise, the partially allocated folios would be freed and
+ * the array slots zeroed
+ */
+int btrfs_alloc_folio_array(unsigned int nr_folios, struct folio **folio_array,
+ gfp_t extra_gfp)
+{
+ for (int i = 0; i < nr_folios; i++) {
+ if (folio_array[i])
+ continue;
+ folio_array[i] = folio_alloc(GFP_NOFS | extra_gfp, 0);
+ if (!folio_array[i])
+ goto error;
+ }
+ return 0;
+error:
+ for (int i = 0; i < nr_folios; i++) {
+ if (folio_array[i])
+ folio_put(folio_array[i]);
+ }
+ return -ENOMEM;
+}
+
+/*
* Populate every free slot in a provided array with pages.
*
* @nr_pages: number of pages to allocate
@@ -681,31 +712,21 @@ static void end_bbio_data_read(struct btrfs_bio *bbio)
int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
gfp_t extra_gfp)
{
+ const gfp_t gfp = GFP_NOFS | extra_gfp;
unsigned int allocated;
for (allocated = 0; allocated < nr_pages;) {
unsigned int last = allocated;
- allocated = alloc_pages_bulk_array(GFP_NOFS | extra_gfp,
- nr_pages, page_array);
-
- if (allocated == nr_pages)
- return 0;
-
- /*
- * During this iteration, no page could be allocated, even
- * though alloc_pages_bulk_array() falls back to alloc_page()
- * if it could not bulk-allocate. So we must be out of memory.
- */
- if (allocated == last) {
+ allocated = alloc_pages_bulk_array(gfp, nr_pages, page_array);
+ if (unlikely(allocated == last)) {
+ /* No progress, fail and do cleanup. */
for (int i = 0; i < allocated; i++) {
__free_page(page_array[i]);
page_array[i] = NULL;
}
return -ENOMEM;
}
-
- memalloc_retry_wait(GFP_NOFS);
}
return 0;
}
@@ -1581,7 +1602,7 @@ static void set_btree_ioerr(struct extent_buffer *eb)
* can be no longer dirty nor marked anymore for writeback (if a
* subsequent modification to the extent buffer didn't happen before the
* transaction commit), which makes filemap_fdata[write|wait]_range not
- * able to find the pages tagged with SetPageError at transaction
+ * able to find the pages which contain errors at transaction
* commit time. So if this happens we must abort the transaction,
* otherwise we commit a super block with btree roots that point to
* btree nodes/leafs whose content on disk is invalid - either garbage
@@ -2256,8 +2277,7 @@ next_page:
submit_write_bio(&bio_ctrl, found_error ? ret : 0);
}
-int extent_writepages(struct address_space *mapping,
- struct writeback_control *wbc)
+int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
struct inode *inode = mapping->host;
int ret = 0;
@@ -2277,7 +2297,7 @@ int extent_writepages(struct address_space *mapping,
return ret;
}
-void extent_readahead(struct readahead_control *rac)
+void btrfs_readahead(struct readahead_control *rac)
{
struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
struct page *pagepool[16];
@@ -2335,19 +2355,20 @@ int extent_invalidate_folio(struct extent_io_tree *tree,
* are locked or under IO and drops the related state bits if it is safe
* to drop the page.
*/
-static int try_release_extent_state(struct extent_io_tree *tree,
+static bool try_release_extent_state(struct extent_io_tree *tree,
struct page *page, gfp_t mask)
{
u64 start = page_offset(page);
u64 end = start + PAGE_SIZE - 1;
- int ret = 1;
+ bool ret;
if (test_range_bit_exists(tree, start, end, EXTENT_LOCKED)) {
- ret = 0;
+ ret = false;
} else {
u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
EXTENT_DELALLOC_NEW | EXTENT_CTLBITS |
EXTENT_QGROUP_RESERVED);
+ int ret2;
/*
* At this point we can safely clear everything except the
@@ -2355,15 +2376,15 @@ static int try_release_extent_state(struct extent_io_tree *tree,
* The delalloc new bit will be cleared by ordered extent
* completion.
*/
- ret = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL);
+ ret2 = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL);
/* if clear_extent_bit failed for enomem reasons,
* we can't allow the release to continue.
*/
- if (ret < 0)
- ret = 0;
+ if (ret2 < 0)
+ ret = false;
else
- ret = 1;
+ ret = true;
}
return ret;
}
@@ -2373,84 +2394,80 @@ static int try_release_extent_state(struct extent_io_tree *tree,
* in the range corresponding to the page, both state records and extent
* map records are removed
*/
-int try_release_extent_mapping(struct page *page, gfp_t mask)
+bool try_release_extent_mapping(struct page *page, gfp_t mask)
{
- struct extent_map *em;
u64 start = page_offset(page);
u64 end = start + PAGE_SIZE - 1;
- struct btrfs_inode *btrfs_inode = page_to_inode(page);
- struct extent_io_tree *tree = &btrfs_inode->io_tree;
- struct extent_map_tree *map = &btrfs_inode->extent_tree;
-
- if (gfpflags_allow_blocking(mask) &&
- page->mapping->host->i_size > SZ_16M) {
- u64 len;
- while (start <= end) {
- struct btrfs_fs_info *fs_info;
- u64 cur_gen;
-
- len = end - start + 1;
- write_lock(&map->lock);
- em = lookup_extent_mapping(map, start, len);
- if (!em) {
- write_unlock(&map->lock);
- break;
- }
- if ((em->flags & EXTENT_FLAG_PINNED) ||
- em->start != start) {
- write_unlock(&map->lock);
- free_extent_map(em);
- break;
- }
- if (test_range_bit_exists(tree, em->start,
- extent_map_end(em) - 1,
- EXTENT_LOCKED))
- goto next;
- /*
- * If it's not in the list of modified extents, used
- * by a fast fsync, we can remove it. If it's being
- * logged we can safely remove it since fsync took an
- * extra reference on the em.
- */
- if (list_empty(&em->list) ||
- (em->flags & EXTENT_FLAG_LOGGING))
- goto remove_em;
- /*
- * If it's in the list of modified extents, remove it
- * only if its generation is older then the current one,
- * in which case we don't need it for a fast fsync.
- * Otherwise don't remove it, we could be racing with an
- * ongoing fast fsync that could miss the new extent.
- */
- fs_info = btrfs_inode->root->fs_info;
- spin_lock(&fs_info->trans_lock);
- cur_gen = fs_info->generation;
- spin_unlock(&fs_info->trans_lock);
- if (em->generation >= cur_gen)
- goto next;
-remove_em:
- /*
- * We only remove extent maps that are not in the list of
- * modified extents or that are in the list but with a
- * generation lower then the current generation, so there
- * is no need to set the full fsync flag on the inode (it
- * hurts the fsync performance for workloads with a data
- * size that exceeds or is close to the system's memory).
- */
- remove_extent_mapping(map, em);
- /* once for the rb tree */
+ struct btrfs_inode *inode = page_to_inode(page);
+ struct extent_io_tree *io_tree = &inode->io_tree;
+
+ while (start <= end) {
+ const u64 cur_gen = btrfs_get_fs_generation(inode->root->fs_info);
+ const u64 len = end - start + 1;
+ struct extent_map_tree *extent_tree = &inode->extent_tree;
+ struct extent_map *em;
+
+ write_lock(&extent_tree->lock);
+ em = lookup_extent_mapping(extent_tree, start, len);
+ if (!em) {
+ write_unlock(&extent_tree->lock);
+ break;
+ }
+ if ((em->flags & EXTENT_FLAG_PINNED) || em->start != start) {
+ write_unlock(&extent_tree->lock);
free_extent_map(em);
+ break;
+ }
+ if (test_range_bit_exists(io_tree, em->start,
+ extent_map_end(em) - 1, EXTENT_LOCKED))
+ goto next;
+ /*
+ * If it's not in the list of modified extents, used by a fast
+ * fsync, we can remove it. If it's being logged we can safely
+ * remove it since fsync took an extra reference on the em.
+ */
+ if (list_empty(&em->list) || (em->flags & EXTENT_FLAG_LOGGING))
+ goto remove_em;
+ /*
+ * If it's in the list of modified extents, remove it only if
+ * its generation is older then the current one, in which case
+ * we don't need it for a fast fsync. Otherwise don't remove it,
+ * we could be racing with an ongoing fast fsync that could miss
+ * the new extent.
+ */
+ if (em->generation >= cur_gen)
+ goto next;
+remove_em:
+ /*
+ * We only remove extent maps that are not in the list of
+ * modified extents or that are in the list but with a
+ * generation lower then the current generation, so there is no
+ * need to set the full fsync flag on the inode (it hurts the
+ * fsync performance for workloads with a data size that exceeds
+ * or is close to the system's memory).
+ */
+ remove_extent_mapping(inode, em);
+ /* Once for the inode's extent map tree. */
+ free_extent_map(em);
next:
- start = extent_map_end(em);
- write_unlock(&map->lock);
+ start = extent_map_end(em);
+ write_unlock(&extent_tree->lock);
- /* once for us */
- free_extent_map(em);
+ /* Once for us, for the lookup_extent_mapping() reference. */
+ free_extent_map(em);
- cond_resched(); /* Allow large-extent preemption. */
+ if (need_resched()) {
+ /*
+ * If we need to resched but we can't block just exit
+ * and leave any remaining extent maps.
+ */
+ if (!gfpflags_allow_blocking(mask))
+ break;
+
+ cond_resched();
}
}
- return try_release_extent_state(tree, page, mask);
+ return try_release_extent_state(io_tree, page, mask);
}
struct btrfs_fiemap_entry {
@@ -2783,13 +2800,19 @@ static int fiemap_next_leaf_item(struct btrfs_inode *inode, struct btrfs_path *p
goto out;
}
- /* See the comment at fiemap_search_slot() about why we clone. */
- copy_extent_buffer_full(clone, path->nodes[0]);
/*
* Important to preserve the start field, for the optimizations when
* checking if extents are shared (see extent_fiemap()).
+ *
+ * We must set ->start before calling copy_extent_buffer_full(). If we
+ * are on sub-pagesize blocksize, we use ->start to determine the offset
+ * into the folio where our eb exists, and if we update ->start after
+ * the fact then any subsequent reads of the eb may read from a
+ * different offset in the folio than where we originally copied into.
*/
clone->start = path->nodes[0]->start;
+ /* See the comment at fiemap_search_slot() about why we clone. */
+ copy_extent_buffer_full(clone, path->nodes[0]);
slot = path->slots[0];
btrfs_release_path(path);
@@ -4154,7 +4177,7 @@ void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
* The actual zeroout of the buffer will happen later in
* btree_csum_one_bio.
*/
- if (btrfs_is_zoned(fs_info)) {
+ if (btrfs_is_zoned(fs_info) && test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
set_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags);
return;
}
@@ -4193,6 +4216,7 @@ void set_extent_buffer_dirty(struct extent_buffer *eb)
num_folios = num_extent_folios(eb);
WARN_ON(atomic_read(&eb->refs) == 0);
WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
+ WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags));
if (!was_dirty) {
bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
@@ -4270,6 +4294,13 @@ void set_extent_buffer_uptodate(struct extent_buffer *eb)
}
}
+static void clear_extent_buffer_reading(struct extent_buffer *eb)
+{
+ clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
+ smp_mb__after_atomic();
+ wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
+}
+
static void end_bbio_meta_read(struct btrfs_bio *bbio)
{
struct extent_buffer *eb = bbio->private;
@@ -4278,6 +4309,13 @@ static void end_bbio_meta_read(struct btrfs_bio *bbio)
struct folio_iter fi;
u32 bio_offset = 0;
+ /*
+ * If the extent buffer is marked UPTODATE before the read operation
+ * completes, other calls to read_extent_buffer_pages() will return
+ * early without waiting for the read to finish, causing data races.
+ */
+ WARN_ON(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags));
+
eb->read_mirror = bbio->mirror_num;
if (uptodate &&
@@ -4304,9 +4342,7 @@ static void end_bbio_meta_read(struct btrfs_bio *bbio)
bio_offset += len;
}
- clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
- smp_mb__after_atomic();
- wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
+ clear_extent_buffer_reading(eb);
free_extent_buffer(eb);
bio_put(&bbio->bio);
@@ -4333,6 +4369,17 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
goto done;
+ /*
+ * Between the initial test_bit(EXTENT_BUFFER_UPTODATE) and the above
+ * test_and_set_bit(EXTENT_BUFFER_READING), someone else could have
+ * started and finished reading the same eb. In this case, UPTODATE
+ * will now be set, and we shouldn't read it in again.
+ */
+ if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) {
+ clear_extent_buffer_reading(eb);
+ return 0;
+ }
+
clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
eb->read_mirror = 0;
check_buffer_tree_ref(eb);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index e3530d427e1f..dca6b12769ec 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -27,6 +27,7 @@ struct address_space;
struct writeback_control;
struct extent_io_tree;
struct extent_map_tree;
+struct extent_state;
struct btrfs_block_group;
struct btrfs_fs_info;
struct btrfs_inode;
@@ -230,18 +231,17 @@ static inline void extent_changeset_free(struct extent_changeset *changeset)
kfree(changeset);
}
-int try_release_extent_mapping(struct page *page, gfp_t mask);
+bool try_release_extent_mapping(struct page *page, gfp_t mask);
int try_release_extent_buffer(struct page *page);
int btrfs_read_folio(struct file *file, struct folio *folio);
void extent_write_locked_range(struct inode *inode, struct page *locked_page,
u64 start, u64 end, struct writeback_control *wbc,
bool pages_dirty);
-int extent_writepages(struct address_space *mapping,
- struct writeback_control *wbc);
+int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc);
int btree_write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc);
-void extent_readahead(struct readahead_control *rac);
+void btrfs_readahead(struct readahead_control *rac);
int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len);
int set_folio_extent_mapped(struct folio *folio);
@@ -353,6 +353,7 @@ void clear_extent_buffer_uptodate(struct extent_buffer *eb);
void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
struct page *locked_page,
+ struct extent_state **cached,
u32 bits_to_clear, unsigned long page_ops);
int extent_invalidate_folio(struct extent_io_tree *tree,
struct folio *folio, size_t offset);
@@ -361,6 +362,8 @@ void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
gfp_t extra_gfp);
+int btrfs_alloc_folio_array(unsigned int nr_folios, struct folio **folio_array,
+ gfp_t extra_gfp);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
bool find_lock_delalloc_range(struct inode *inode,
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 347ca13d15a9..744e8952abb0 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -8,6 +8,7 @@
#include "extent_map.h"
#include "compression.h"
#include "btrfs_inode.h"
+#include "disk-io.h"
static struct kmem_cache *extent_map_cache;
@@ -76,6 +77,14 @@ static u64 range_end(u64 start, u64 len)
return start + len;
}
+static void dec_evictable_extent_maps(struct btrfs_inode *inode)
+{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+
+ if (!btrfs_is_testing(fs_info) && is_fstree(btrfs_root_id(inode->root)))
+ percpu_counter_dec(&fs_info->evictable_extent_maps);
+}
+
static int tree_insert(struct rb_root_cached *root, struct extent_map *em)
{
struct rb_node **p = &root->rb_root.rb_node;
@@ -223,8 +232,9 @@ static bool mergeable_maps(const struct extent_map *prev, const struct extent_ma
return next->block_start == prev->block_start;
}
-static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
+static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em)
{
+ struct extent_map_tree *tree = &inode->extent_tree;
struct extent_map *merge = NULL;
struct rb_node *rb;
@@ -252,14 +262,13 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
em->len += merge->len;
em->block_len += merge->block_len;
em->block_start = merge->block_start;
- em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
- em->mod_start = merge->mod_start;
em->generation = max(em->generation, merge->generation);
em->flags |= EXTENT_FLAG_MERGED;
rb_erase_cached(&merge->rb_node, &tree->map);
RB_CLEAR_NODE(&merge->rb_node);
free_extent_map(merge);
+ dec_evictable_extent_maps(inode);
}
}
@@ -271,10 +280,10 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
em->block_len += merge->block_len;
rb_erase_cached(&merge->rb_node, &tree->map);
RB_CLEAR_NODE(&merge->rb_node);
- em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
em->generation = max(em->generation, merge->generation);
em->flags |= EXTENT_FLAG_MERGED;
free_extent_map(merge);
+ dec_evictable_extent_maps(inode);
}
}
@@ -300,7 +309,6 @@ int unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen)
struct extent_map_tree *tree = &inode->extent_tree;
int ret = 0;
struct extent_map *em;
- bool prealloc = false;
write_lock(&tree->lock);
em = lookup_extent_mapping(tree, start, len);
@@ -309,7 +317,7 @@ int unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen)
btrfs_warn(fs_info,
"no extent map found for inode %llu (root %lld) when unpinning extent range [%llu, %llu), generation %llu",
btrfs_ino(inode), btrfs_root_id(inode->root),
- start, len, gen);
+ start, start + len, gen);
ret = -ENOENT;
goto out;
}
@@ -318,87 +326,79 @@ int unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen)
btrfs_warn(fs_info,
"found extent map for inode %llu (root %lld) with unexpected start offset %llu when unpinning extent range [%llu, %llu), generation %llu",
btrfs_ino(inode), btrfs_root_id(inode->root),
- em->start, start, len, gen);
+ em->start, start, start + len, gen);
ret = -EUCLEAN;
goto out;
}
em->generation = gen;
em->flags &= ~EXTENT_FLAG_PINNED;
- em->mod_start = em->start;
- em->mod_len = em->len;
- if (em->flags & EXTENT_FLAG_FILLING) {
- prealloc = true;
- em->flags &= ~EXTENT_FLAG_FILLING;
- }
-
- try_merge_map(tree, em);
-
- if (prealloc) {
- em->mod_start = em->start;
- em->mod_len = em->len;
- }
+ try_merge_map(inode, em);
- free_extent_map(em);
out:
write_unlock(&tree->lock);
+ free_extent_map(em);
return ret;
}
-void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
+void clear_em_logging(struct btrfs_inode *inode, struct extent_map *em)
{
- lockdep_assert_held_write(&tree->lock);
+ lockdep_assert_held_write(&inode->extent_tree.lock);
em->flags &= ~EXTENT_FLAG_LOGGING;
if (extent_map_in_tree(em))
- try_merge_map(tree, em);
+ try_merge_map(inode, em);
}
-static inline void setup_extent_mapping(struct extent_map_tree *tree,
+static inline void setup_extent_mapping(struct btrfs_inode *inode,
struct extent_map *em,
int modified)
{
refcount_inc(&em->refs);
- em->mod_start = em->start;
- em->mod_len = em->len;
ASSERT(list_empty(&em->list));
if (modified)
- list_add(&em->list, &tree->modified_extents);
+ list_add(&em->list, &inode->extent_tree.modified_extents);
else
- try_merge_map(tree, em);
+ try_merge_map(inode, em);
}
/*
- * Add new extent map to the extent tree
+ * Add a new extent map to an inode's extent map tree.
*
- * @tree: tree to insert new map in
+ * @inode: the target inode
* @em: map to insert
* @modified: indicate whether the given @em should be added to the
* modified list, which indicates the extent needs to be logged
*
- * Insert @em into @tree or perform a simple forward/backward merge with
- * existing mappings. The extent_map struct passed in will be inserted
- * into the tree directly, with an additional reference taken, or a
- * reference dropped if the merge attempt was successful.
+ * Insert @em into the @inode's extent map tree or perform a simple
+ * forward/backward merge with existing mappings. The extent_map struct passed
+ * in will be inserted into the tree directly, with an additional reference
+ * taken, or a reference dropped if the merge attempt was successful.
*/
-static int add_extent_mapping(struct extent_map_tree *tree,
+static int add_extent_mapping(struct btrfs_inode *inode,
struct extent_map *em, int modified)
{
- int ret = 0;
+ struct extent_map_tree *tree = &inode->extent_tree;
+ struct btrfs_root *root = inode->root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ int ret;
lockdep_assert_held_write(&tree->lock);
ret = tree_insert(&tree->map, em);
if (ret)
- goto out;
+ return ret;
- setup_extent_mapping(tree, em, modified);
-out:
- return ret;
+ setup_extent_mapping(inode, em, modified);
+
+ if (!btrfs_is_testing(fs_info) && is_fstree(btrfs_root_id(root)))
+ percpu_counter_inc(&fs_info->evictable_extent_maps);
+
+ return 0;
}
static struct extent_map *
@@ -464,16 +464,18 @@ struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
}
/*
- * Remove an extent_map from the extent tree.
+ * Remove an extent_map from its inode's extent tree.
*
- * @tree: extent tree to remove from
+ * @inode: the inode the extent map belongs to
* @em: extent map being removed
*
- * Remove @em from @tree. No reference counts are dropped, and no checks
- * are done to see if the range is in use.
+ * Remove @em from the extent tree of @inode. No reference counts are dropped,
+ * and no checks are done to see if the range is in use.
*/
-void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
+void remove_extent_mapping(struct btrfs_inode *inode, struct extent_map *em)
{
+ struct extent_map_tree *tree = &inode->extent_tree;
+
lockdep_assert_held_write(&tree->lock);
WARN_ON(em->flags & EXTENT_FLAG_PINNED);
@@ -481,13 +483,17 @@ void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
if (!(em->flags & EXTENT_FLAG_LOGGING))
list_del_init(&em->list);
RB_CLEAR_NODE(&em->rb_node);
+
+ dec_evictable_extent_maps(inode);
}
-static void replace_extent_mapping(struct extent_map_tree *tree,
+static void replace_extent_mapping(struct btrfs_inode *inode,
struct extent_map *cur,
struct extent_map *new,
int modified)
{
+ struct extent_map_tree *tree = &inode->extent_tree;
+
lockdep_assert_held_write(&tree->lock);
WARN_ON(cur->flags & EXTENT_FLAG_PINNED);
@@ -497,7 +503,7 @@ static void replace_extent_mapping(struct extent_map_tree *tree,
rb_replace_node_cached(&cur->rb_node, &new->rb_node, &tree->map);
RB_CLEAR_NODE(&cur->rb_node);
- setup_extent_mapping(tree, new, modified);
+ setup_extent_mapping(inode, new, modified);
}
static struct extent_map *next_extent_map(const struct extent_map *em)
@@ -526,7 +532,7 @@ static struct extent_map *prev_extent_map(struct extent_map *em)
* and an extent that you want to insert, deal with overlap and insert
* the best fitted new extent into the tree.
*/
-static noinline int merge_extent_mapping(struct extent_map_tree *em_tree,
+static noinline int merge_extent_mapping(struct btrfs_inode *inode,
struct extent_map *existing,
struct extent_map *em,
u64 map_start)
@@ -560,14 +566,13 @@ static noinline int merge_extent_mapping(struct extent_map_tree *em_tree,
em->block_start += start_diff;
em->block_len = em->len;
}
- return add_extent_mapping(em_tree, em, 0);
+ return add_extent_mapping(inode, em, 0);
}
/*
- * Add extent mapping into em_tree.
+ * Add extent mapping into an inode's extent map tree.
*
- * @fs_info: the filesystem
- * @em_tree: extent tree into which we want to insert the extent mapping
+ * @inode: target inode
* @em_in: extent we are inserting
* @start: start of the logical range btrfs_get_extent() is requesting
* @len: length of the logical range btrfs_get_extent() is requesting
@@ -575,8 +580,8 @@ static noinline int merge_extent_mapping(struct extent_map_tree *em_tree,
* Note that @em_in's range may be different from [start, start+len),
* but they must be overlapped.
*
- * Insert @em_in into @em_tree. In case there is an overlapping range, handle
- * the -EEXIST by either:
+ * Insert @em_in into the inode's extent map tree. In case there is an
+ * overlapping range, handle the -EEXIST by either:
* a) Returning the existing extent in @em_in if @start is within the
* existing em.
* b) Merge the existing extent with @em_in passed in.
@@ -584,12 +589,12 @@ static noinline int merge_extent_mapping(struct extent_map_tree *em_tree,
* Return 0 on success, otherwise -EEXIST.
*
*/
-int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info,
- struct extent_map_tree *em_tree,
+int btrfs_add_extent_mapping(struct btrfs_inode *inode,
struct extent_map **em_in, u64 start, u64 len)
{
int ret;
struct extent_map *em = *em_in;
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
/*
* Tree-checker should have rejected any inline extent with non-zero
@@ -598,7 +603,7 @@ int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info,
if (em->block_start == EXTENT_MAP_INLINE)
ASSERT(em->start == 0);
- ret = add_extent_mapping(em_tree, em, 0);
+ ret = add_extent_mapping(inode, em, 0);
/* it is possible that someone inserted the extent into the tree
* while we had the lock dropped. It is also possible that
* an overlapping map exists in the tree
@@ -606,7 +611,7 @@ int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info,
if (ret == -EEXIST) {
struct extent_map *existing;
- existing = search_extent_mapping(em_tree, start, len);
+ existing = search_extent_mapping(&inode->extent_tree, start, len);
trace_btrfs_handle_em_exist(fs_info, existing, em, start, len);
@@ -627,15 +632,14 @@ int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info,
* The existing extent map is the one nearest to
* the [start, start + len) range which overlaps
*/
- ret = merge_extent_mapping(em_tree, existing,
- em, start);
- if (ret) {
+ ret = merge_extent_mapping(inode, existing, em, start);
+ if (WARN_ON(ret)) {
free_extent_map(em);
*em_in = NULL;
- WARN_ONCE(ret,
-"extent map merge error existing [%llu, %llu) with em [%llu, %llu) start %llu\n",
- existing->start, existing->len,
- orig_start, orig_len, start);
+ btrfs_warn(fs_info,
+"extent map merge error existing [%llu, %llu) with em [%llu, %llu) start %llu",
+ existing->start, extent_map_end(existing),
+ orig_start, orig_start + orig_len, start);
}
free_extent_map(existing);
}
@@ -650,8 +654,10 @@ int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info,
* if needed. This avoids searching the tree, from the root down to the first
* extent map, before each deletion.
*/
-static void drop_all_extent_maps_fast(struct extent_map_tree *tree)
+static void drop_all_extent_maps_fast(struct btrfs_inode *inode)
{
+ struct extent_map_tree *tree = &inode->extent_tree;
+
write_lock(&tree->lock);
while (!RB_EMPTY_ROOT(&tree->map.rb_root)) {
struct extent_map *em;
@@ -660,7 +666,7 @@ static void drop_all_extent_maps_fast(struct extent_map_tree *tree)
node = rb_first_cached(&tree->map);
em = rb_entry(node, struct extent_map, rb_node);
em->flags &= ~(EXTENT_FLAG_PINNED | EXTENT_FLAG_LOGGING);
- remove_extent_mapping(tree, em);
+ remove_extent_mapping(inode, em);
free_extent_map(em);
cond_resched_rwlock_write(&tree->lock);
}
@@ -693,7 +699,7 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
WARN_ON(end < start);
if (end == (u64)-1) {
if (start == 0 && !skip_pinned) {
- drop_all_extent_maps_fast(em_tree);
+ drop_all_extent_maps_fast(inode);
return;
}
len = (u64)-1;
@@ -790,7 +796,7 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
split->generation = gen;
split->flags = flags;
- replace_extent_mapping(em_tree, em, split, modified);
+ replace_extent_mapping(inode, em, split, modified);
free_extent_map(split);
split = split2;
split2 = NULL;
@@ -817,7 +823,7 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
split->block_len = em->block_len;
split->orig_start = em->orig_start;
} else {
- const u64 diff = start + len - em->start;
+ const u64 diff = end - em->start;
split->block_len = split->len;
split->block_start += diff;
@@ -831,13 +837,11 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
}
if (extent_map_in_tree(em)) {
- replace_extent_mapping(em_tree, em, split,
- modified);
+ replace_extent_mapping(inode, em, split, modified);
} else {
int ret;
- ret = add_extent_mapping(em_tree, split,
- modified);
+ ret = add_extent_mapping(inode, split, modified);
/* Logic error, shouldn't happen. */
ASSERT(ret == 0);
if (WARN_ON(ret != 0) && modified)
@@ -872,7 +876,7 @@ remove_em:
ASSERT(!split);
btrfs_set_inode_full_sync(inode);
}
- remove_extent_mapping(em_tree, em);
+ remove_extent_mapping(inode, em);
}
/*
@@ -927,7 +931,7 @@ int btrfs_replace_extent_map_range(struct btrfs_inode *inode,
do {
btrfs_drop_extent_map_range(inode, new_em->start, end, false);
write_lock(&tree->lock);
- ret = add_extent_mapping(tree, new_em, modified);
+ ret = add_extent_mapping(inode, new_em, modified);
write_unlock(&tree->lock);
} while (ret == -EEXIST);
@@ -991,7 +995,7 @@ int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre,
split_pre->flags = flags;
split_pre->generation = em->generation;
- replace_extent_mapping(em_tree, em, split_pre, 1);
+ replace_extent_mapping(inode, em, split_pre, 1);
/*
* Now we only have an extent_map at:
@@ -1008,7 +1012,7 @@ int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre,
split_mid->ram_bytes = split_mid->len;
split_mid->flags = flags;
split_mid->generation = em->generation;
- add_extent_mapping(em_tree, split_mid, 1);
+ add_extent_mapping(inode, split_mid, 1);
/* Once for us */
free_extent_map(em);
@@ -1023,3 +1027,175 @@ out_free_pre:
free_extent_map(split_pre);
return ret;
}
+
+static long btrfs_scan_inode(struct btrfs_inode *inode, long *scanned, long nr_to_scan)
+{
+ const u64 cur_fs_gen = btrfs_get_fs_generation(inode->root->fs_info);
+ struct extent_map_tree *tree = &inode->extent_tree;
+ long nr_dropped = 0;
+ struct rb_node *node;
+
+ /*
+ * Take the mmap lock so that we serialize with the inode logging phase
+ * of fsync because we may need to set the full sync flag on the inode,
+ * in case we have to remove extent maps in the tree's list of modified
+ * extents. If we set the full sync flag in the inode while an fsync is
+ * in progress, we may risk missing new extents because before the flag
+ * is set, fsync decides to only wait for writeback to complete and then
+ * during inode logging it sees the flag set and uses the subvolume tree
+ * to find new extents, which may not be there yet because ordered
+ * extents haven't completed yet.
+ *
+ * We also do a try lock because otherwise we could deadlock. This is
+ * because the shrinker for this filesystem may be invoked while we are
+ * in a path that is holding the mmap lock in write mode. For example in
+ * a reflink operation while COWing an extent buffer, when allocating
+ * pages for a new extent buffer and under memory pressure, the shrinker
+ * may be invoked, and therefore we would deadlock by attempting to read
+ * lock the mmap lock while we are holding already a write lock on it.
+ */
+ if (!down_read_trylock(&inode->i_mmap_lock))
+ return 0;
+
+ write_lock(&tree->lock);
+ node = rb_first_cached(&tree->map);
+ while (node) {
+ struct extent_map *em;
+
+ em = rb_entry(node, struct extent_map, rb_node);
+ node = rb_next(node);
+ (*scanned)++;
+
+ if (em->flags & EXTENT_FLAG_PINNED)
+ goto next;
+
+ /*
+ * If the inode is in the list of modified extents (new) and its
+ * generation is the same (or is greater than) the current fs
+ * generation, it means it was not yet persisted so we have to
+ * set the full sync flag so that the next fsync will not miss
+ * it.
+ */
+ if (!list_empty(&em->list) && em->generation >= cur_fs_gen)
+ btrfs_set_inode_full_sync(inode);
+
+ remove_extent_mapping(inode, em);
+ trace_btrfs_extent_map_shrinker_remove_em(inode, em);
+ /* Drop the reference for the tree. */
+ free_extent_map(em);
+ nr_dropped++;
+next:
+ if (*scanned >= nr_to_scan)
+ break;
+
+ /*
+ * Restart if we had to reschedule, and any extent maps that were
+ * pinned before may have become unpinned after we released the
+ * lock and took it again.
+ */
+ if (cond_resched_rwlock_write(&tree->lock))
+ node = rb_first_cached(&tree->map);
+ }
+ write_unlock(&tree->lock);
+ up_read(&inode->i_mmap_lock);
+
+ return nr_dropped;
+}
+
+static long btrfs_scan_root(struct btrfs_root *root, long *scanned, long nr_to_scan)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_inode *inode;
+ long nr_dropped = 0;
+ u64 min_ino = fs_info->extent_map_shrinker_last_ino + 1;
+
+ inode = btrfs_find_first_inode(root, min_ino);
+ while (inode) {
+ nr_dropped += btrfs_scan_inode(inode, scanned, nr_to_scan);
+
+ min_ino = btrfs_ino(inode) + 1;
+ fs_info->extent_map_shrinker_last_ino = btrfs_ino(inode);
+ iput(&inode->vfs_inode);
+
+ if (*scanned >= nr_to_scan)
+ break;
+
+ cond_resched();
+ inode = btrfs_find_first_inode(root, min_ino);
+ }
+
+ if (inode) {
+ /*
+ * There are still inodes in this root or we happened to process
+ * the last one and reached the scan limit. In either case set
+ * the current root to this one, so we'll resume from the next
+ * inode if there is one or we will find out this was the last
+ * one and move to the next root.
+ */
+ fs_info->extent_map_shrinker_last_root = btrfs_root_id(root);
+ } else {
+ /*
+ * No more inodes in this root, set extent_map_shrinker_last_ino to 0 so
+ * that when processing the next root we start from its first inode.
+ */
+ fs_info->extent_map_shrinker_last_ino = 0;
+ fs_info->extent_map_shrinker_last_root = btrfs_root_id(root) + 1;
+ }
+
+ return nr_dropped;
+}
+
+long btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan)
+{
+ const u64 start_root_id = fs_info->extent_map_shrinker_last_root;
+ u64 next_root_id = start_root_id;
+ bool cycled = false;
+ long nr_dropped = 0;
+ long scanned = 0;
+
+ if (trace_btrfs_extent_map_shrinker_scan_enter_enabled()) {
+ s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps);
+
+ trace_btrfs_extent_map_shrinker_scan_enter(fs_info, nr_to_scan, nr);
+ }
+
+ while (scanned < nr_to_scan) {
+ struct btrfs_root *root;
+ unsigned long count;
+
+ spin_lock(&fs_info->fs_roots_radix_lock);
+ count = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
+ (void **)&root,
+ (unsigned long)next_root_id, 1);
+ if (count == 0) {
+ spin_unlock(&fs_info->fs_roots_radix_lock);
+ if (start_root_id > 0 && !cycled) {
+ next_root_id = 0;
+ fs_info->extent_map_shrinker_last_root = 0;
+ fs_info->extent_map_shrinker_last_ino = 0;
+ cycled = true;
+ continue;
+ }
+ break;
+ }
+ next_root_id = btrfs_root_id(root) + 1;
+ root = btrfs_grab_root(root);
+ spin_unlock(&fs_info->fs_roots_radix_lock);
+
+ if (!root)
+ continue;
+
+ if (is_fstree(btrfs_root_id(root)))
+ nr_dropped += btrfs_scan_root(root, &scanned, nr_to_scan);
+
+ btrfs_put_root(root);
+ }
+
+ if (trace_btrfs_extent_map_shrinker_scan_exit_enabled()) {
+ s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps);
+
+ trace_btrfs_extent_map_shrinker_scan_exit(fs_info, nr_dropped, nr);
+ }
+
+ return nr_dropped;
+}
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index c5a098c99cc6..6d587111f73a 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -30,28 +30,77 @@ enum {
ENUM_BIT(EXTENT_FLAG_PREALLOC),
/* Logging this extent */
ENUM_BIT(EXTENT_FLAG_LOGGING),
- /* Filling in a preallocated extent */
- ENUM_BIT(EXTENT_FLAG_FILLING),
/* This em is merged from two or more physically adjacent ems */
ENUM_BIT(EXTENT_FLAG_MERGED),
};
/*
+ * This structure represents file extents and holes.
+ *
+ * Unlike on-disk file extent items, extent maps can be merged to save memory.
+ * This means members only match file extent items before any merging.
+ *
* Keep this structure as compact as possible, as we can have really large
* amounts of allocated extent maps at any time.
*/
struct extent_map {
struct rb_node rb_node;
- /* all of these are in bytes */
+ /* All of these are in bytes. */
+
+ /* File offset matching the offset of a BTRFS_EXTENT_ITEM_KEY key. */
u64 start;
+
+ /*
+ * Length of the file extent.
+ *
+ * For non-inlined file extents it's btrfs_file_extent_item::num_bytes.
+ * For inline extents it's sectorsize, since inline data starts at
+ * offsetof(struct btrfs_file_extent_item, disk_bytenr) thus
+ * btrfs_file_extent_item::num_bytes is not valid.
+ */
u64 len;
- u64 mod_start;
- u64 mod_len;
+
+ /*
+ * The file offset of the original file extent before splitting.
+ *
+ * This is an in-memory only member, matching
+ * extent_map::start - btrfs_file_extent_item::offset for
+ * regular/preallocated extents. EXTENT_MAP_HOLE otherwise.
+ */
u64 orig_start;
+
+ /*
+ * The full on-disk extent length, matching
+ * btrfs_file_extent_item::disk_num_bytes.
+ */
u64 orig_block_len;
+
+ /*
+ * The decompressed size of the whole on-disk extent, matching
+ * btrfs_file_extent_item::ram_bytes.
+ */
u64 ram_bytes;
+
+ /*
+ * The on-disk logical bytenr for the file extent.
+ *
+ * For compressed extents it matches btrfs_file_extent_item::disk_bytenr.
+ * For uncompressed extents it matches
+ * btrfs_file_extent_item::disk_bytenr + btrfs_file_extent_item::offset
+ *
+ * For holes it is EXTENT_MAP_HOLE and for inline extents it is
+ * EXTENT_MAP_INLINE.
+ */
u64 block_start;
+
+ /*
+ * The on-disk length for the file extent.
+ *
+ * For compressed extents it matches btrfs_file_extent_item::disk_num_bytes.
+ * For uncompressed extents it matches extent_map::len.
+ * For holes and inline extents it's -1 and shouldn't be used.
+ */
u64 block_len;
/*
@@ -124,7 +173,7 @@ static inline u64 extent_map_end(const struct extent_map *em)
void extent_map_tree_init(struct extent_map_tree *tree);
struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
u64 start, u64 len);
-void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em);
+void remove_extent_mapping(struct btrfs_inode *inode, struct extent_map *em);
int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre,
u64 new_logical);
@@ -133,11 +182,10 @@ void free_extent_map(struct extent_map *em);
int __init extent_map_init(void);
void __cold extent_map_exit(void);
int unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen);
-void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em);
+void clear_em_logging(struct btrfs_inode *inode, struct extent_map *em);
struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
u64 start, u64 len);
-int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info,
- struct extent_map_tree *em_tree,
+int btrfs_add_extent_mapping(struct btrfs_inode *inode,
struct extent_map **em_in, u64 start, u64 len);
void btrfs_drop_extent_map_range(struct btrfs_inode *inode,
u64 start, u64 end,
@@ -145,5 +193,6 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode,
int btrfs_replace_extent_map_range(struct btrfs_inode *inode,
struct extent_map *new_em,
bool modified);
+long btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan);
#endif
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index e58fb5347e65..bce95f871750 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -430,8 +430,7 @@ blk_status_t btrfs_lookup_bio_sums(struct btrfs_bio *bbio)
memset(csum_dst, 0, csum_size);
count = 1;
- if (inode->root->root_key.objectid ==
- BTRFS_DATA_RELOC_TREE_OBJECTID) {
+ if (btrfs_root_id(inode->root) == BTRFS_DATA_RELOC_TREE_OBJECTID) {
u64 file_offset = bbio->file_offset + bio_offset;
set_extent_bit(&inode->io_tree, file_offset,
@@ -450,9 +449,22 @@ blk_status_t btrfs_lookup_bio_sums(struct btrfs_bio *bbio)
return ret;
}
+/*
+ * Search for checksums for a given logical range.
+ *
+ * @root: The root where to look for checksums.
+ * @start: Logical address of target checksum range.
+ * @end: End offset (inclusive) of the target checksum range.
+ * @list: List for adding each checksum that was found.
+ * Can be NULL in case the caller only wants to check if
+ * there any checksums for the range.
+ * @nowait: Indicate if the search must be non-blocking or not.
+ *
+ * Return < 0 on error, 0 if no checksums were found, or 1 if checksums were
+ * found.
+ */
int btrfs_lookup_csums_list(struct btrfs_root *root, u64 start, u64 end,
- struct list_head *list, int search_commit,
- bool nowait)
+ struct list_head *list, bool nowait)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
@@ -460,8 +472,8 @@ int btrfs_lookup_csums_list(struct btrfs_root *root, u64 start, u64 end,
struct extent_buffer *leaf;
struct btrfs_ordered_sum *sums;
struct btrfs_csum_item *item;
- LIST_HEAD(tmplist);
int ret;
+ bool found_csums = false;
ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
IS_ALIGNED(end + 1, fs_info->sectorsize));
@@ -471,11 +483,6 @@ int btrfs_lookup_csums_list(struct btrfs_root *root, u64 start, u64 end,
return -ENOMEM;
path->nowait = nowait;
- if (search_commit) {
- path->skip_locking = 1;
- path->reada = READA_FORWARD;
- path->search_commit_root = 1;
- }
key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
key.offset = start;
@@ -483,7 +490,7 @@ int btrfs_lookup_csums_list(struct btrfs_root *root, u64 start, u64 end,
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
- goto fail;
+ goto out;
if (ret > 0 && path->slots[0] > 0) {
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
@@ -518,7 +525,7 @@ int btrfs_lookup_csums_list(struct btrfs_root *root, u64 start, u64 end,
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0)
- goto fail;
+ goto out;
if (ret > 0)
break;
leaf = path->nodes[0];
@@ -540,6 +547,10 @@ int btrfs_lookup_csums_list(struct btrfs_root *root, u64 start, u64 end,
continue;
}
+ found_csums = true;
+ if (!list)
+ goto out;
+
csum_end = min(csum_end, end + 1);
item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_csum_item);
@@ -553,7 +564,7 @@ int btrfs_lookup_csums_list(struct btrfs_root *root, u64 start, u64 end,
GFP_NOFS);
if (!sums) {
ret = -ENOMEM;
- goto fail;
+ goto out;
}
sums->logical = start;
@@ -567,21 +578,24 @@ int btrfs_lookup_csums_list(struct btrfs_root *root, u64 start, u64 end,
bytes_to_csum_size(fs_info, size));
start += size;
- list_add_tail(&sums->list, &tmplist);
+ list_add_tail(&sums->list, list);
}
path->slots[0]++;
}
- ret = 0;
-fail:
- while (ret < 0 && !list_empty(&tmplist)) {
- sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
- list_del(&sums->list);
- kfree(sums);
+out:
+ btrfs_free_path(path);
+ if (ret < 0) {
+ if (list) {
+ struct btrfs_ordered_sum *tmp_sums;
+
+ list_for_each_entry_safe(sums, tmp_sums, list, list)
+ kfree(sums);
+ }
+
+ return ret;
}
- list_splice_tail(&tmplist, list);
- btrfs_free_path(path);
- return ret;
+ return found_csums ? 1 : 0;
}
/*
@@ -870,8 +884,8 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
const u32 csum_size = fs_info->csum_size;
u32 blocksize_bits = fs_info->sectorsize_bits;
- ASSERT(root->root_key.objectid == BTRFS_CSUM_TREE_OBJECTID ||
- root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
+ ASSERT(btrfs_root_id(root) == BTRFS_CSUM_TREE_OBJECTID ||
+ btrfs_root_id(root) == BTRFS_TREE_LOG_OBJECTID);
path = btrfs_alloc_path();
if (!path)
@@ -1171,7 +1185,7 @@ extend_csum:
* search, etc, because log trees are temporary anyway and it
* would only save a few bytes of leaf space.
*/
- if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
+ if (btrfs_root_id(root) == BTRFS_TREE_LOG_OBJECTID) {
if (path->slots[0] + 1 >=
btrfs_header_nritems(path->nodes[0])) {
ret = find_next_csum_offset(root, path, &next_offset);
@@ -1265,20 +1279,19 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
struct extent_buffer *leaf = path->nodes[0];
const int slot = path->slots[0];
struct btrfs_key key;
- u64 extent_start, extent_end;
+ u64 extent_start;
u64 bytenr;
u8 type = btrfs_file_extent_type(leaf, fi);
int compress_type = btrfs_file_extent_compression(leaf, fi);
btrfs_item_key_to_cpu(leaf, &key, slot);
extent_start = key.offset;
- extent_end = btrfs_file_extent_end(path);
em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
em->generation = btrfs_file_extent_generation(leaf, fi);
if (type == BTRFS_FILE_EXTENT_REG ||
type == BTRFS_FILE_EXTENT_PREALLOC) {
em->start = extent_start;
- em->len = extent_end - extent_start;
+ em->len = btrfs_file_extent_end(path) - extent_start;
em->orig_start = extent_start -
btrfs_file_extent_offset(leaf, fi);
em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
@@ -1299,9 +1312,12 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
em->flags |= EXTENT_FLAG_PREALLOC;
}
} else if (type == BTRFS_FILE_EXTENT_INLINE) {
+ /* Tree-checker has ensured this. */
+ ASSERT(extent_start == 0);
+
em->block_start = EXTENT_MAP_INLINE;
- em->start = extent_start;
- em->len = extent_end - extent_start;
+ em->start = 0;
+ em->len = fs_info->sectorsize;
/*
* Initialize orig_start and block_len with the same values
* as in inode.c:btrfs_get_extent().
@@ -1313,7 +1329,7 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
btrfs_err(fs_info,
"unknown file extent item type %d, inode %llu, offset %llu, "
"root %llu", type, btrfs_ino(inode), extent_start,
- root->root_key.objectid);
+ btrfs_root_id(root));
}
}
@@ -1334,12 +1350,10 @@ u64 btrfs_file_extent_end(const struct btrfs_path *path)
ASSERT(key.type == BTRFS_EXTENT_DATA_KEY);
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
- if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) {
- end = btrfs_file_extent_ram_bytes(leaf, fi);
- end = ALIGN(key.offset + end, leaf->fs_info->sectorsize);
- } else {
+ if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE)
+ end = leaf->fs_info->sectorsize;
+ else
end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
- }
return end;
}
diff --git a/fs/btrfs/file-item.h b/fs/btrfs/file-item.h
index 15c05cc0fce6..557dc43d7142 100644
--- a/fs/btrfs/file-item.h
+++ b/fs/btrfs/file-item.h
@@ -68,8 +68,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
struct list_head *list, int search_commit,
bool nowait);
int btrfs_lookup_csums_list(struct btrfs_root *root, u64 start, u64 end,
- struct list_head *list, int search_commit,
- bool nowait);
+ struct list_head *list, bool nowait);
int btrfs_lookup_csums_bitmap(struct btrfs_root *root, struct btrfs_path *path,
u64 start, u64 end, u8 *csum_buf,
unsigned long *csum_bitmap);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index f9d76072398d..e764ac3f22e2 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -128,7 +128,7 @@ int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
struct extent_state **cached, bool noreserve)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- int err = 0;
+ int ret = 0;
int i;
u64 num_bytes;
u64 start_pos;
@@ -158,10 +158,10 @@ int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
cached);
- err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
+ ret = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
extra_bits, cached);
- if (err)
- return err;
+ if (ret)
+ return ret;
for (i = 0; i < num_pages; i++) {
struct page *p = pages[i];
@@ -206,7 +206,6 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi;
- struct btrfs_ref ref = { 0 };
struct btrfs_key key;
struct btrfs_key new_key;
u64 ino = btrfs_ino(inode);
@@ -246,7 +245,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
if (args->start >= inode->disk_i_size && !args->replace_extent)
modify_tree = 0;
- update_refs = (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID);
+ update_refs = (btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID);
while (1) {
recow = 0;
ret = btrfs_lookup_file_extent(trans, root, path, ino,
@@ -373,15 +372,17 @@ next_slot:
btrfs_mark_buffer_dirty(trans, leaf);
if (update_refs && disk_bytenr > 0) {
- btrfs_init_generic_ref(&ref,
- BTRFS_ADD_DELAYED_REF,
- disk_bytenr, num_bytes, 0,
- root->root_key.objectid);
- btrfs_init_data_ref(&ref,
- root->root_key.objectid,
- new_key.objectid,
- args->start - extent_offset,
- 0, false);
+ struct btrfs_ref ref = {
+ .action = BTRFS_ADD_DELAYED_REF,
+ .bytenr = disk_bytenr,
+ .num_bytes = num_bytes,
+ .parent = 0,
+ .owning_root = btrfs_root_id(root),
+ .ref_root = btrfs_root_id(root),
+ };
+ btrfs_init_data_ref(&ref, new_key.objectid,
+ args->start - extent_offset,
+ 0, false);
ret = btrfs_inc_extent_ref(trans, &ref);
if (ret) {
btrfs_abort_transaction(trans, ret);
@@ -464,15 +465,17 @@ delete_extent_item:
extent_end = ALIGN(extent_end,
fs_info->sectorsize);
} else if (update_refs && disk_bytenr > 0) {
- btrfs_init_generic_ref(&ref,
- BTRFS_DROP_DELAYED_REF,
- disk_bytenr, num_bytes, 0,
- root->root_key.objectid);
- btrfs_init_data_ref(&ref,
- root->root_key.objectid,
- key.objectid,
- key.offset - extent_offset, 0,
- false);
+ struct btrfs_ref ref = {
+ .action = BTRFS_DROP_DELAYED_REF,
+ .bytenr = disk_bytenr,
+ .num_bytes = num_bytes,
+ .parent = 0,
+ .owning_root = btrfs_root_id(root),
+ .ref_root = btrfs_root_id(root),
+ };
+ btrfs_init_data_ref(&ref, key.objectid,
+ key.offset - extent_offset,
+ 0, false);
ret = btrfs_free_extent(trans, &ref);
if (ret) {
btrfs_abort_transaction(trans, ret);
@@ -748,10 +751,13 @@ again:
extent_end - split);
btrfs_mark_buffer_dirty(trans, leaf);
- btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
- num_bytes, 0, root->root_key.objectid);
- btrfs_init_data_ref(&ref, root->root_key.objectid, ino,
- orig_offset, 0, false);
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ ref.bytenr = bytenr;
+ ref.num_bytes = num_bytes;
+ ref.parent = 0;
+ ref.owning_root = btrfs_root_id(root);
+ ref.ref_root = btrfs_root_id(root);
+ btrfs_init_data_ref(&ref, ino, orig_offset, 0, false);
ret = btrfs_inc_extent_ref(trans, &ref);
if (ret) {
btrfs_abort_transaction(trans, ret);
@@ -774,10 +780,14 @@ again:
other_start = end;
other_end = 0;
- btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
- num_bytes, 0, root->root_key.objectid);
- btrfs_init_data_ref(&ref, root->root_key.objectid, ino, orig_offset,
- 0, false);
+
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ ref.bytenr = bytenr;
+ ref.num_bytes = num_bytes;
+ ref.parent = 0;
+ ref.owning_root = btrfs_root_id(root);
+ ref.ref_root = btrfs_root_id(root);
+ btrfs_init_data_ref(&ref, ino, orig_offset, 0, false);
if (extent_mergeable(leaf, path->slots[0] + 1,
ino, bytenr, orig_offset,
&other_start, &other_end)) {
@@ -915,7 +925,7 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages,
unsigned long index = pos >> PAGE_SHIFT;
gfp_t mask = get_prepare_gfp_flags(inode, nowait);
fgf_t fgp_flags = get_prepare_fgp_flags(nowait);
- int err = 0;
+ int ret = 0;
int faili;
for (i = 0; i < num_pages; i++) {
@@ -925,28 +935,28 @@ again:
if (!pages[i]) {
faili = i - 1;
if (nowait)
- err = -EAGAIN;
+ ret = -EAGAIN;
else
- err = -ENOMEM;
+ ret = -ENOMEM;
goto fail;
}
- err = set_page_extent_mapped(pages[i]);
- if (err < 0) {
+ ret = set_page_extent_mapped(pages[i]);
+ if (ret < 0) {
faili = i;
goto fail;
}
if (i == 0)
- err = prepare_uptodate_page(inode, pages[i], pos,
+ ret = prepare_uptodate_page(inode, pages[i], pos,
force_uptodate);
- if (!err && i == num_pages - 1)
- err = prepare_uptodate_page(inode, pages[i],
+ if (!ret && i == num_pages - 1)
+ ret = prepare_uptodate_page(inode, pages[i],
pos + write_bytes, false);
- if (err) {
+ if (ret) {
put_page(pages[i]);
- if (!nowait && err == -EAGAIN) {
- err = 0;
+ if (!nowait && ret == -EAGAIN) {
+ ret = 0;
goto again;
}
faili = i - 1;
@@ -962,7 +972,7 @@ fail:
put_page(pages[faili]);
faili--;
}
- return err;
+ return ret;
}
@@ -1465,7 +1475,7 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
ssize_t written_buffered;
size_t prev_left = 0;
loff_t endbyte;
- ssize_t err;
+ ssize_t ret;
unsigned int ilock_flags = 0;
struct iomap_dio *dio;
@@ -1482,9 +1492,9 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
ilock_flags |= BTRFS_ILOCK_SHARED;
relock:
- err = btrfs_inode_lock(BTRFS_I(inode), ilock_flags);
- if (err < 0)
- return err;
+ ret = btrfs_inode_lock(BTRFS_I(inode), ilock_flags);
+ if (ret < 0)
+ return ret;
/* Shared lock cannot be used with security bits set. */
if ((ilock_flags & BTRFS_ILOCK_SHARED) && !IS_NOSEC(inode)) {
@@ -1493,14 +1503,14 @@ relock:
goto relock;
}
- err = generic_write_checks(iocb, from);
- if (err <= 0) {
+ ret = generic_write_checks(iocb, from);
+ if (ret <= 0) {
btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
- return err;
+ return ret;
}
- err = btrfs_write_check(iocb, from, err);
- if (err < 0) {
+ ret = btrfs_write_check(iocb, from, ret);
+ if (ret < 0) {
btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
goto out;
}
@@ -1552,15 +1562,15 @@ relock:
btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
if (IS_ERR_OR_NULL(dio))
- err = PTR_ERR_OR_ZERO(dio);
+ ret = PTR_ERR_OR_ZERO(dio);
else
- err = iomap_dio_complete(dio);
+ ret = iomap_dio_complete(dio);
/* No increment (+=) because iomap returns a cumulative value. */
- if (err > 0)
- written = err;
+ if (ret > 0)
+ written = ret;
- if (iov_iter_count(from) > 0 && (err == -EFAULT || err > 0)) {
+ if (iov_iter_count(from) > 0 && (ret == -EFAULT || ret > 0)) {
const size_t left = iov_iter_count(from);
/*
* We have more data left to write. Try to fault in as many as
@@ -1577,7 +1587,7 @@ relock:
* to buffered IO in case we haven't made any progress.
*/
if (left == prev_left) {
- err = -ENOTBLK;
+ ret = -ENOTBLK;
} else {
fault_in_iov_iter_readable(from, left);
prev_left = left;
@@ -1586,10 +1596,10 @@ relock:
}
/*
- * If 'err' is -ENOTBLK or we have not written all data, then it means
+ * If 'ret' is -ENOTBLK or we have not written all data, then it means
* we must fallback to buffered IO.
*/
- if ((err < 0 && err != -ENOTBLK) || !iov_iter_count(from))
+ if ((ret < 0 && ret != -ENOTBLK) || !iov_iter_count(from))
goto out;
buffered:
@@ -1600,14 +1610,14 @@ buffered:
* below, we will block when flushing and waiting for the IO.
*/
if (iocb->ki_flags & IOCB_NOWAIT) {
- err = -EAGAIN;
+ ret = -EAGAIN;
goto out;
}
pos = iocb->ki_pos;
written_buffered = btrfs_buffered_write(iocb, from);
if (written_buffered < 0) {
- err = written_buffered;
+ ret = written_buffered;
goto out;
}
/*
@@ -1615,18 +1625,18 @@ buffered:
* able to read what was just written.
*/
endbyte = pos + written_buffered - 1;
- err = btrfs_fdatawrite_range(inode, pos, endbyte);
- if (err)
+ ret = btrfs_fdatawrite_range(inode, pos, endbyte);
+ if (ret)
goto out;
- err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
- if (err)
+ ret = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
+ if (ret)
goto out;
written += written_buffered;
iocb->ki_pos = pos + written_buffered;
invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
endbyte >> PAGE_SHIFT);
out:
- return err < 0 ? err : written;
+ return ret < 0 ? ret : written;
}
static ssize_t btrfs_encoded_write(struct kiocb *iocb, struct iov_iter *from,
@@ -2029,6 +2039,172 @@ out_release_extents:
goto out;
}
+/*
+ * btrfs_page_mkwrite() is not allowed to change the file size as it gets
+ * called from a page fault handler when a page is first dirtied. Hence we must
+ * be careful to check for EOF conditions here. We set the page up correctly
+ * for a written page which means we get ENOSPC checking when writing into
+ * holes and correct delalloc and unwritten extent mapping on filesystems that
+ * support these features.
+ *
+ * We are not allowed to take the i_mutex here so we have to play games to
+ * protect against truncate races as the page could now be beyond EOF. Because
+ * truncate_setsize() writes the inode size before removing pages, once we have
+ * the page lock we can determine safely if the page is beyond EOF. If it is not
+ * beyond EOF, then the page is guaranteed safe against truncation until we
+ * unlock the page.
+ */
+static vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
+{
+ struct page *page = vmf->page;
+ struct folio *folio = page_folio(page);
+ struct inode *inode = file_inode(vmf->vma->vm_file);
+ struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
+ struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+ struct btrfs_ordered_extent *ordered;
+ struct extent_state *cached_state = NULL;
+ struct extent_changeset *data_reserved = NULL;
+ unsigned long zero_start;
+ loff_t size;
+ vm_fault_t ret;
+ int ret2;
+ int reserved = 0;
+ u64 reserved_space;
+ u64 page_start;
+ u64 page_end;
+ u64 end;
+
+ ASSERT(folio_order(folio) == 0);
+
+ reserved_space = PAGE_SIZE;
+
+ sb_start_pagefault(inode->i_sb);
+ page_start = page_offset(page);
+ page_end = page_start + PAGE_SIZE - 1;
+ end = page_end;
+
+ /*
+ * Reserving delalloc space after obtaining the page lock can lead to
+ * deadlock. For example, if a dirty page is locked by this function
+ * and the call to btrfs_delalloc_reserve_space() ends up triggering
+ * dirty page write out, then the btrfs_writepages() function could
+ * end up waiting indefinitely to get a lock on the page currently
+ * being processed by btrfs_page_mkwrite() function.
+ */
+ ret2 = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved,
+ page_start, reserved_space);
+ if (!ret2) {
+ ret2 = file_update_time(vmf->vma->vm_file);
+ reserved = 1;
+ }
+ if (ret2) {
+ ret = vmf_error(ret2);
+ if (reserved)
+ goto out;
+ goto out_noreserve;
+ }
+
+ /* Make the VM retry the fault. */
+ ret = VM_FAULT_NOPAGE;
+again:
+ down_read(&BTRFS_I(inode)->i_mmap_lock);
+ lock_page(page);
+ size = i_size_read(inode);
+
+ if ((page->mapping != inode->i_mapping) ||
+ (page_start >= size)) {
+ /* Page got truncated out from underneath us. */
+ goto out_unlock;
+ }
+ wait_on_page_writeback(page);
+
+ lock_extent(io_tree, page_start, page_end, &cached_state);
+ ret2 = set_page_extent_mapped(page);
+ if (ret2 < 0) {
+ ret = vmf_error(ret2);
+ unlock_extent(io_tree, page_start, page_end, &cached_state);
+ goto out_unlock;
+ }
+
+ /*
+ * We can't set the delalloc bits if there are pending ordered
+ * extents. Drop our locks and wait for them to finish.
+ */
+ ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, PAGE_SIZE);
+ if (ordered) {
+ unlock_extent(io_tree, page_start, page_end, &cached_state);
+ unlock_page(page);
+ up_read(&BTRFS_I(inode)->i_mmap_lock);
+ btrfs_start_ordered_extent(ordered);
+ btrfs_put_ordered_extent(ordered);
+ goto again;
+ }
+
+ if (page->index == ((size - 1) >> PAGE_SHIFT)) {
+ reserved_space = round_up(size - page_start, fs_info->sectorsize);
+ if (reserved_space < PAGE_SIZE) {
+ end = page_start + reserved_space - 1;
+ btrfs_delalloc_release_space(BTRFS_I(inode),
+ data_reserved, page_start,
+ PAGE_SIZE - reserved_space, true);
+ }
+ }
+
+ /*
+ * page_mkwrite gets called when the page is firstly dirtied after it's
+ * faulted in, but write(2) could also dirty a page and set delalloc
+ * bits, thus in this case for space account reason, we still need to
+ * clear any delalloc bits within this page range since we have to
+ * reserve data&meta space before lock_page() (see above comments).
+ */
+ clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
+ EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
+ EXTENT_DEFRAG, &cached_state);
+
+ ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0,
+ &cached_state);
+ if (ret2) {
+ unlock_extent(io_tree, page_start, page_end, &cached_state);
+ ret = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+
+ /* Page is wholly or partially inside EOF. */
+ if (page_start + PAGE_SIZE > size)
+ zero_start = offset_in_page(size);
+ else
+ zero_start = PAGE_SIZE;
+
+ if (zero_start != PAGE_SIZE)
+ memzero_page(page, zero_start, PAGE_SIZE - zero_start);
+
+ btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE);
+ btrfs_folio_set_dirty(fs_info, folio, page_start, end + 1 - page_start);
+ btrfs_folio_set_uptodate(fs_info, folio, page_start, end + 1 - page_start);
+
+ btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
+
+ unlock_extent(io_tree, page_start, page_end, &cached_state);
+ up_read(&BTRFS_I(inode)->i_mmap_lock);
+
+ btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
+ sb_end_pagefault(inode->i_sb);
+ extent_changeset_free(data_reserved);
+ return VM_FAULT_LOCKED;
+
+out_unlock:
+ unlock_page(page);
+ up_read(&BTRFS_I(inode)->i_mmap_lock);
+out:
+ btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
+ btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start,
+ reserved_space, (ret != 0));
+out_noreserve:
+ sb_end_pagefault(inode->i_sb);
+ extent_changeset_free(data_reserved);
+ return ret;
+}
+
static const struct vm_operations_struct btrfs_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
@@ -2258,7 +2434,6 @@ static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf;
struct btrfs_key key;
int slot;
- struct btrfs_ref ref = { 0 };
int ret;
if (replace_len == 0)
@@ -2314,15 +2489,17 @@ static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
extent_info->qgroup_reserved,
&key);
} else {
+ struct btrfs_ref ref = {
+ .action = BTRFS_ADD_DELAYED_REF,
+ .bytenr = extent_info->disk_offset,
+ .num_bytes = extent_info->disk_len,
+ .owning_root = btrfs_root_id(root),
+ .ref_root = btrfs_root_id(root),
+ };
u64 ref_offset;
- btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
- extent_info->disk_offset,
- extent_info->disk_len, 0,
- root->root_key.objectid);
ref_offset = extent_info->file_offset - extent_info->data_offset;
- btrfs_init_data_ref(&ref, root->root_key.objectid,
- btrfs_ino(inode), ref_offset, 0, false);
+ btrfs_init_data_ref(&ref, btrfs_ino(inode), ref_offset, 0, false);
ret = btrfs_inc_extent_ref(trans, &ref);
}
@@ -3719,8 +3896,7 @@ static int btrfs_file_open(struct inode *inode, struct file *filp)
{
int ret;
- filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC | FMODE_BUF_WASYNC |
- FMODE_CAN_ODIRECT;
+ filp->f_mode |= FMODE_NOWAIT | FMODE_CAN_ODIRECT;
ret = fsverity_file_open(inode, filp);
if (ret)
@@ -3850,6 +4026,7 @@ const struct file_operations btrfs_file_operations = {
.compat_ioctl = btrfs_compat_ioctl,
#endif
.remap_file_range = btrfs_remap_file_range,
+ .fop_flags = FOP_BUFFER_RASYNC | FOP_BUFFER_WASYNC,
};
int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index c8a05d5eb9cb..3ab8dea5036b 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1911,9 +1911,9 @@ static inline void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
ctl->free_space -= bytes;
}
-static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
- struct btrfs_free_space *info, u64 offset,
- u64 bytes)
+static void btrfs_bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
+ struct btrfs_free_space *info, u64 offset,
+ u64 bytes)
{
unsigned long start, count, end;
int extent_delta = 1;
@@ -2249,7 +2249,7 @@ static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
bytes_to_set = min(end - offset, bytes);
- bitmap_set_bits(ctl, info, offset, bytes_to_set);
+ btrfs_bitmap_set_bits(ctl, info, offset, bytes_to_set);
return bytes_to_set;
diff --git a/fs/btrfs/fs.h b/fs/btrfs/fs.h
index 93f5c57ea4e3..89f0650631cd 100644
--- a/fs/btrfs/fs.h
+++ b/fs/btrfs/fs.h
@@ -9,7 +9,6 @@
#include <linux/compiler.h>
#include <linux/math.h>
#include <linux/atomic.h>
-#include <linux/blkdev.h>
#include <linux/percpu_counter.h>
#include <linux/completion.h>
#include <linux/lockdep.h>
@@ -630,6 +629,10 @@ struct btrfs_fs_info {
s32 dirty_metadata_batch;
s32 delalloc_batch;
+ struct percpu_counter evictable_extent_maps;
+ u64 extent_map_shrinker_last_root;
+ u64 extent_map_shrinker_last_ino;
+
/* Protected by 'trans_lock'. */
struct list_head dirty_cowonly_roots;
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c
index 9c1394c0a6d7..84a94d19b22c 100644
--- a/fs/btrfs/inode-item.c
+++ b/fs/btrfs/inode-item.c
@@ -670,16 +670,18 @@ delete:
}
if (del_item && extent_start != 0 && !control->skip_ref_updates) {
- struct btrfs_ref ref = { 0 };
+ struct btrfs_ref ref = {
+ .action = BTRFS_DROP_DELAYED_REF,
+ .bytenr = extent_start,
+ .num_bytes = extent_num_bytes,
+ .owning_root = btrfs_root_id(root),
+ .ref_root = btrfs_header_owner(leaf),
+ };
bytes_deleted += extent_num_bytes;
- btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF,
- extent_start, extent_num_bytes, 0,
- root->root_key.objectid);
- btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
- control->ino, extent_offset,
- root->root_key.objectid, false);
+ btrfs_init_data_ref(&ref, control->ino, extent_offset,
+ btrfs_root_id(root), false);
ret = btrfs_free_extent(trans, &ref);
if (ret) {
btrfs_abort_transaction(trans, ret);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 37701531eeb1..753db965f7c0 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -254,7 +254,7 @@ static void print_data_reloc_error(const struct btrfs_inode *inode, u64 file_off
btrfs_warn_rl(fs_info, "has data reloc tree but no running relocation");
btrfs_warn_rl(fs_info,
"csum failed root %lld ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
- inode->root->root_key.objectid, btrfs_ino(inode), file_off,
+ btrfs_root_id(inode->root), btrfs_ino(inode), file_off,
CSUM_FMT_VALUE(csum_size, csum),
CSUM_FMT_VALUE(csum_size, csum_expected),
mirror_num);
@@ -264,7 +264,7 @@ static void print_data_reloc_error(const struct btrfs_inode *inode, u64 file_off
logical += file_off;
btrfs_warn_rl(fs_info,
"csum failed root %lld ino %llu off %llu logical %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
- inode->root->root_key.objectid,
+ btrfs_root_id(inode->root),
btrfs_ino(inode), file_off, logical,
CSUM_FMT_VALUE(csum_size, csum),
CSUM_FMT_VALUE(csum_size, csum_expected),
@@ -331,15 +331,15 @@ static void __cold btrfs_print_data_csum_error(struct btrfs_inode *inode,
const u32 csum_size = root->fs_info->csum_size;
/* For data reloc tree, it's better to do a backref lookup instead. */
- if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
+ if (btrfs_root_id(root) == BTRFS_DATA_RELOC_TREE_OBJECTID)
return print_data_reloc_error(inode, logical_start, csum,
csum_expected, mirror_num);
/* Output without objectid, which is more meaningful */
- if (root->root_key.objectid >= BTRFS_LAST_FREE_OBJECTID) {
+ if (btrfs_root_id(root) >= BTRFS_LAST_FREE_OBJECTID) {
btrfs_warn_rl(root->fs_info,
"csum failed root %lld ino %lld off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
- root->root_key.objectid, btrfs_ino(inode),
+ btrfs_root_id(root), btrfs_ino(inode),
logical_start,
CSUM_FMT_VALUE(csum_size, csum),
CSUM_FMT_VALUE(csum_size, csum_expected),
@@ -347,7 +347,7 @@ static void __cold btrfs_print_data_csum_error(struct btrfs_inode *inode,
} else {
btrfs_warn_rl(root->fs_info,
"csum failed root %llu ino %llu off %llu csum " CSUM_FMT " expected csum " CSUM_FMT " mirror %d",
- root->root_key.objectid, btrfs_ino(inode),
+ btrfs_root_id(root), btrfs_ino(inode),
logical_start,
CSUM_FMT_VALUE(csum_size, csum),
CSUM_FMT_VALUE(csum_size, csum_expected),
@@ -512,12 +512,13 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode, bool extent_inserted,
size_t size, size_t compressed_size,
int compress_type,
- struct page **compressed_pages,
+ struct folio *compressed_folio,
bool update_i_size)
{
struct btrfs_root *root = inode->root;
struct extent_buffer *leaf;
struct page *page = NULL;
+ const u32 sectorsize = trans->fs_info->sectorsize;
char *kaddr;
unsigned long ptr;
struct btrfs_file_extent_item *ei;
@@ -525,10 +526,23 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
size_t cur_size = size;
u64 i_size;
- ASSERT((compressed_size > 0 && compressed_pages) ||
- (compressed_size == 0 && !compressed_pages));
+ /*
+ * The decompressed size must still be no larger than a sector. Under
+ * heavy race, we can have size == 0 passed in, but that shouldn't be a
+ * big deal and we can continue the insertion.
+ */
+ ASSERT(size <= sectorsize);
+
+ /*
+ * The compressed size also needs to be no larger than a sector.
+ * That's also why we only need one page as the parameter.
+ */
+ if (compressed_folio)
+ ASSERT(compressed_size <= sectorsize);
+ else
+ ASSERT(compressed_size == 0);
- if (compressed_size && compressed_pages)
+ if (compressed_size && compressed_folio)
cur_size = compressed_size;
if (!extent_inserted) {
@@ -556,21 +570,10 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
ptr = btrfs_file_extent_inline_start(ei);
if (compress_type != BTRFS_COMPRESS_NONE) {
- struct page *cpage;
- int i = 0;
- while (compressed_size > 0) {
- cpage = compressed_pages[i];
- cur_size = min_t(unsigned long, compressed_size,
- PAGE_SIZE);
-
- kaddr = kmap_local_page(cpage);
- write_extent_buffer(leaf, kaddr, ptr, cur_size);
- kunmap_local(kaddr);
+ kaddr = kmap_local_folio(compressed_folio, 0);
+ write_extent_buffer(leaf, kaddr, ptr, compressed_size);
+ kunmap_local(kaddr);
- i++;
- ptr += cur_size;
- compressed_size -= cur_size;
- }
btrfs_set_file_extent_compression(leaf, ei,
compress_type);
} else {
@@ -611,17 +614,62 @@ fail:
return ret;
}
+static bool can_cow_file_range_inline(struct btrfs_inode *inode,
+ u64 offset, u64 size,
+ size_t compressed_size)
+{
+ struct btrfs_fs_info *fs_info = inode->root->fs_info;
+ u64 data_len = (compressed_size ?: size);
+
+ /* Inline extents must start at offset 0. */
+ if (offset != 0)
+ return false;
+
+ /*
+ * Due to the page size limit, for subpage we can only trigger the
+ * writeback for the dirty sectors of page, that means data writeback
+ * is doing more writeback than what we want.
+ *
+ * This is especially unexpected for some call sites like fallocate,
+ * where we only increase i_size after everything is done.
+ * This means we can trigger inline extent even if we didn't want to.
+ * So here we skip inline extent creation completely.
+ */
+ if (fs_info->sectorsize != PAGE_SIZE)
+ return false;
+
+ /* Inline extents are limited to sectorsize. */
+ if (size > fs_info->sectorsize)
+ return false;
+
+ /* We cannot exceed the maximum inline data size. */
+ if (data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info))
+ return false;
+
+ /* We cannot exceed the user specified max_inline size. */
+ if (data_len > fs_info->max_inline)
+ return false;
+
+ /* Inline extents must be the entirety of the file. */
+ if (size < i_size_read(&inode->vfs_inode))
+ return false;
+
+ return true;
+}
/*
* conditionally insert an inline extent into the file. This
* does the checks required to make sure the data is small enough
* to fit as an inline extent.
+ *
+ * If being used directly, you must have already checked we're allowed to cow
+ * the range by getting true from can_cow_file_range_inline().
*/
-static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 size,
- size_t compressed_size,
- int compress_type,
- struct page **compressed_pages,
- bool update_i_size)
+static noinline int __cow_file_range_inline(struct btrfs_inode *inode, u64 offset,
+ u64 size, size_t compressed_size,
+ int compress_type,
+ struct folio *compressed_folio,
+ bool update_i_size)
{
struct btrfs_drop_extents_args drop_args = { 0 };
struct btrfs_root *root = inode->root;
@@ -631,18 +679,6 @@ static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 size,
int ret;
struct btrfs_path *path;
- /*
- * We can create an inline extent if it ends at or beyond the current
- * i_size, is no larger than a sector (decompressed), and the (possibly
- * compressed) data fits in a leaf and the configured maximum inline
- * size.
- */
- if (size < i_size_read(&inode->vfs_inode) ||
- size > fs_info->sectorsize ||
- data_len > BTRFS_MAX_INLINE_DATA_SIZE(fs_info) ||
- data_len > fs_info->max_inline)
- return 1;
-
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -668,7 +704,7 @@ static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 size,
ret = insert_inline_extent(trans, path, inode, drop_args.extent_inserted,
size, compressed_size, compress_type,
- compressed_pages, update_i_size);
+ compressed_folio, update_i_size);
if (ret && ret != -ENOSPC) {
btrfs_abort_transaction(trans, ret);
goto out;
@@ -701,12 +737,44 @@ out:
return ret;
}
+static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 offset,
+ u64 end,
+ size_t compressed_size,
+ int compress_type,
+ struct folio *compressed_folio,
+ bool update_i_size)
+{
+ struct extent_state *cached = NULL;
+ unsigned long clear_flags = EXTENT_DELALLOC | EXTENT_DELALLOC_NEW |
+ EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING | EXTENT_LOCKED;
+ u64 size = min_t(u64, i_size_read(&inode->vfs_inode), end + 1);
+ int ret;
+
+ if (!can_cow_file_range_inline(inode, offset, size, compressed_size))
+ return 1;
+
+ lock_extent(&inode->io_tree, offset, end, &cached);
+ ret = __cow_file_range_inline(inode, offset, size, compressed_size,
+ compress_type, compressed_folio,
+ update_i_size);
+ if (ret > 0) {
+ unlock_extent(&inode->io_tree, offset, end, &cached);
+ return ret;
+ }
+
+ extent_clear_unlock_delalloc(inode, offset, end, NULL, &cached,
+ clear_flags,
+ PAGE_UNLOCK | PAGE_START_WRITEBACK |
+ PAGE_END_WRITEBACK);
+ return ret;
+}
+
struct async_extent {
u64 start;
u64 ram_size;
u64 compressed_size;
- struct page **pages;
- unsigned long nr_pages;
+ struct folio **folios;
+ unsigned long nr_folios;
int compress_type;
struct list_head list;
};
@@ -731,8 +799,8 @@ struct async_cow {
static noinline int add_async_extent(struct async_chunk *cow,
u64 start, u64 ram_size,
u64 compressed_size,
- struct page **pages,
- unsigned long nr_pages,
+ struct folio **folios,
+ unsigned long nr_folios,
int compress_type)
{
struct async_extent *async_extent;
@@ -743,8 +811,8 @@ static noinline int add_async_extent(struct async_chunk *cow,
async_extent->start = start;
async_extent->ram_size = ram_size;
async_extent->compressed_size = compressed_size;
- async_extent->pages = pages;
- async_extent->nr_pages = nr_pages;
+ async_extent->folios = folios;
+ async_extent->nr_folios = nr_folios;
async_extent->compress_type = compress_type;
list_add_tail(&async_extent->list, &cow->extents);
return 0;
@@ -848,8 +916,8 @@ static void compress_file_range(struct btrfs_work *work)
u64 actual_end;
u64 i_size;
int ret = 0;
- struct page **pages;
- unsigned long nr_pages;
+ struct folio **folios;
+ unsigned long nr_folios;
unsigned long total_compressed = 0;
unsigned long total_in = 0;
unsigned int poff;
@@ -879,9 +947,9 @@ static void compress_file_range(struct btrfs_work *work)
barrier();
actual_end = min_t(u64, i_size, end + 1);
again:
- pages = NULL;
- nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
- nr_pages = min_t(unsigned long, nr_pages, BTRFS_MAX_COMPRESSED_PAGES);
+ folios = NULL;
+ nr_folios = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
+ nr_folios = min_t(unsigned long, nr_folios, BTRFS_MAX_COMPRESSED_PAGES);
/*
* we don't want to send crud past the end of i_size through
@@ -930,8 +998,8 @@ again:
if (!inode_need_compress(inode, start, end))
goto cleanup_and_bail_uncompressed;
- pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
- if (!pages) {
+ folios = kcalloc(nr_folios, sizeof(struct folio *), GFP_NOFS);
+ if (!folios) {
/*
* Memory allocation failure is not a fatal error, we can fall
* back to uncompressed code.
@@ -945,9 +1013,9 @@ again:
compress_type = inode->prop_compress;
/* Compression level is applied here. */
- ret = btrfs_compress_pages(compress_type | (fs_info->compress_level << 4),
- mapping, start, pages, &nr_pages, &total_in,
- &total_compressed);
+ ret = btrfs_compress_folios(compress_type | (fs_info->compress_level << 4),
+ mapping, start, folios, &nr_folios, &total_in,
+ &total_compressed);
if (ret)
goto mark_incompressible;
@@ -957,7 +1025,7 @@ again:
*/
poff = offset_in_page(total_compressed);
if (poff)
- memzero_page(pages[nr_pages - 1], poff, PAGE_SIZE - poff);
+ folio_zero_range(folios[nr_folios - 1], poff, PAGE_SIZE - poff);
/*
* Try to create an inline extent.
@@ -968,43 +1036,16 @@ again:
* Check cow_file_range() for why we don't even try to create inline
* extent for the subpage case.
*/
- if (start == 0 && fs_info->sectorsize == PAGE_SIZE) {
- if (total_in < actual_end) {
- ret = cow_file_range_inline(inode, actual_end, 0,
- BTRFS_COMPRESS_NONE, NULL,
- false);
- } else {
- ret = cow_file_range_inline(inode, actual_end,
- total_compressed,
- compress_type, pages,
- false);
- }
- if (ret <= 0) {
- unsigned long clear_flags = EXTENT_DELALLOC |
- EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
- EXTENT_DO_ACCOUNTING;
-
- if (ret < 0)
- mapping_set_error(mapping, -EIO);
-
- /*
- * inline extent creation worked or returned error,
- * we don't need to create any more async work items.
- * Unlock and free up our temp pages.
- *
- * We use DO_ACCOUNTING here because we need the
- * delalloc_release_metadata to be done _after_ we drop
- * our outstanding extent for clearing delalloc for this
- * range.
- */
- extent_clear_unlock_delalloc(inode, start, end,
- NULL,
- clear_flags,
- PAGE_UNLOCK |
- PAGE_START_WRITEBACK |
- PAGE_END_WRITEBACK);
- goto free_pages;
- }
+ if (total_in < actual_end)
+ ret = cow_file_range_inline(inode, start, end, 0,
+ BTRFS_COMPRESS_NONE, NULL, false);
+ else
+ ret = cow_file_range_inline(inode, start, end, total_compressed,
+ compress_type, folios[0], false);
+ if (ret <= 0) {
+ if (ret < 0)
+ mapping_set_error(mapping, -EIO);
+ goto free_pages;
}
/*
@@ -1026,8 +1067,8 @@ again:
* The async work queues will take care of doing actual allocation on
* disk for these compressed pages, and will submit the bios.
*/
- ret = add_async_extent(async_chunk, start, total_in, total_compressed, pages,
- nr_pages, compress_type);
+ ret = add_async_extent(async_chunk, start, total_in, total_compressed, folios,
+ nr_folios, compress_type);
BUG_ON(ret);
if (start + total_in < end) {
start += total_in;
@@ -1044,12 +1085,12 @@ cleanup_and_bail_uncompressed:
BTRFS_COMPRESS_NONE);
BUG_ON(ret);
free_pages:
- if (pages) {
- for (i = 0; i < nr_pages; i++) {
- WARN_ON(pages[i]->mapping);
- btrfs_free_compr_page(pages[i]);
+ if (folios) {
+ for (i = 0; i < nr_folios; i++) {
+ WARN_ON(folios[i]->mapping);
+ btrfs_free_compr_folio(folios[i]);
}
- kfree(pages);
+ kfree(folios);
}
}
@@ -1057,16 +1098,16 @@ static void free_async_extent_pages(struct async_extent *async_extent)
{
int i;
- if (!async_extent->pages)
+ if (!async_extent->folios)
return;
- for (i = 0; i < async_extent->nr_pages; i++) {
- WARN_ON(async_extent->pages[i]->mapping);
- btrfs_free_compr_page(async_extent->pages[i]);
+ for (i = 0; i < async_extent->nr_folios; i++) {
+ WARN_ON(async_extent->folios[i]->mapping);
+ btrfs_free_compr_folio(async_extent->folios[i]);
}
- kfree(async_extent->pages);
- async_extent->nr_pages = 0;
- async_extent->pages = NULL;
+ kfree(async_extent->folios);
+ async_extent->nr_folios = 0;
+ async_extent->folios = NULL;
}
static void submit_uncompressed_range(struct btrfs_inode *inode,
@@ -1113,6 +1154,7 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
struct btrfs_ordered_extent *ordered;
struct btrfs_key ins;
struct page *locked_page = NULL;
+ struct extent_state *cached = NULL;
struct extent_map *em;
int ret = 0;
u64 start = async_extent->start;
@@ -1132,7 +1174,6 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
if (!(start >= locked_page_end || end <= locked_page_start))
locked_page = async_chunk->locked_page;
}
- lock_extent(io_tree, start, end, NULL);
if (async_extent->compress_type == BTRFS_COMPRESS_NONE) {
submit_uncompressed_range(inode, async_extent, locked_page);
@@ -1145,15 +1186,17 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
0, *alloc_hint, &ins, 1, 1);
if (ret) {
/*
- * Here we used to try again by going back to non-compressed
- * path for ENOSPC. But we can't reserve space even for
- * compressed size, how could it work for uncompressed size
- * which requires larger size? So here we directly go error
- * path.
+ * We can't reserve contiguous space for the compressed size.
+ * Unlikely, but it's possible that we could have enough
+ * non-contiguous space for the uncompressed size instead. So
+ * fall back to uncompressed.
*/
- goto out_free;
+ submit_uncompressed_range(inode, async_extent, locked_page);
+ goto done;
}
+ lock_extent(io_tree, start, end, &cached);
+
/* Here we're doing allocation and writeback of the compressed pages */
em = create_io_em(inode, start,
async_extent->ram_size, /* len */
@@ -1187,11 +1230,11 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
/* Clear dirty, set writeback and unlock the pages. */
extent_clear_unlock_delalloc(inode, start, end,
- NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
+ NULL, &cached, EXTENT_LOCKED | EXTENT_DELALLOC,
PAGE_UNLOCK | PAGE_START_WRITEBACK);
btrfs_submit_compressed_write(ordered,
- async_extent->pages, /* compressed_pages */
- async_extent->nr_pages,
+ async_extent->folios, /* compressed_folios */
+ async_extent->nr_folios,
async_chunk->write_flags, true);
*alloc_hint = ins.objectid + ins.offset;
done:
@@ -1203,10 +1246,10 @@ done:
out_free_reserve:
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
-out_free:
mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
extent_clear_unlock_delalloc(inode, start, end,
- NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
+ NULL, &cached,
+ EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DELALLOC_NEW |
EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
PAGE_UNLOCK | PAGE_START_WRITEBACK |
@@ -1216,7 +1259,7 @@ out_free:
kthread_associate_blkcg(NULL);
btrfs_debug(fs_info,
"async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d",
- root->root_key.objectid, btrfs_ino(inode), start,
+ btrfs_root_id(root), btrfs_ino(inode), start,
async_extent->ram_size, ret);
kfree(async_extent);
}
@@ -1288,6 +1331,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
+ struct extent_state *cached = NULL;
u64 alloc_hint = 0;
u64 orig_start = start;
u64 num_bytes;
@@ -1313,53 +1357,21 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
inode_should_defrag(inode, start, end, num_bytes, SZ_64K);
- /*
- * Due to the page size limit, for subpage we can only trigger the
- * writeback for the dirty sectors of page, that means data writeback
- * is doing more writeback than what we want.
- *
- * This is especially unexpected for some call sites like fallocate,
- * where we only increase i_size after everything is done.
- * This means we can trigger inline extent even if we didn't want to.
- * So here we skip inline extent creation completely.
- */
- if (start == 0 && fs_info->sectorsize == PAGE_SIZE && !no_inline) {
- u64 actual_end = min_t(u64, i_size_read(&inode->vfs_inode),
- end + 1);
-
+ if (!no_inline) {
/* lets try to make an inline extent */
- ret = cow_file_range_inline(inode, actual_end, 0,
+ ret = cow_file_range_inline(inode, start, end, 0,
BTRFS_COMPRESS_NONE, NULL, false);
- if (ret == 0) {
- /*
- * We use DO_ACCOUNTING here because we need the
- * delalloc_release_metadata to be run _after_ we drop
- * our outstanding extent for clearing delalloc for this
- * range.
- */
- extent_clear_unlock_delalloc(inode, start, end,
- locked_page,
- EXTENT_LOCKED | EXTENT_DELALLOC |
- EXTENT_DELALLOC_NEW | EXTENT_DEFRAG |
- EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
- PAGE_START_WRITEBACK | PAGE_END_WRITEBACK);
+ if (ret <= 0) {
/*
- * locked_page is locked by the caller of
- * writepage_delalloc(), not locked by
- * __process_pages_contig().
- *
- * We can't let __process_pages_contig() to unlock it,
- * as it doesn't have any subpage::writers recorded.
+ * We succeeded, return 1 so the caller knows we're done
+ * with this page and already handled the IO.
*
- * Here we manually unlock the page, since the caller
- * can't determine if it's an inline extent or a
- * compressed extent.
+ * If there was an error then cow_file_range_inline() has
+ * already done the cleanup.
*/
- unlock_page(locked_page);
- ret = 1;
+ if (ret == 0)
+ ret = 1;
goto done;
- } else if (ret < 0) {
- goto out_unlock;
}
}
@@ -1419,6 +1431,10 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
extent_reserved = true;
ram_size = ins.offset;
+
+ lock_extent(&inode->io_tree, start, start + ram_size - 1,
+ &cached);
+
em = create_io_em(inode, start, ins.offset, /* len */
start, /* orig_start */
ins.objectid, /* block_start */
@@ -1428,6 +1444,8 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
BTRFS_COMPRESS_NONE, /* compress_type */
BTRFS_ORDERED_REGULAR /* type */);
if (IS_ERR(em)) {
+ unlock_extent(&inode->io_tree, start,
+ start + ram_size - 1, &cached);
ret = PTR_ERR(em);
goto out_reserve;
}
@@ -1438,6 +1456,8 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
0, 1 << BTRFS_ORDERED_REGULAR,
BTRFS_COMPRESS_NONE);
if (IS_ERR(ordered)) {
+ unlock_extent(&inode->io_tree, start,
+ start + ram_size - 1, &cached);
ret = PTR_ERR(ordered);
goto out_drop_extent_cache;
}
@@ -1477,7 +1497,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
page_ops |= PAGE_SET_ORDERED;
extent_clear_unlock_delalloc(inode, start, start + ram_size - 1,
- locked_page,
+ locked_page, &cached,
EXTENT_LOCKED | EXTENT_DELALLOC,
page_ops);
if (num_bytes < cur_alloc_size)
@@ -1536,10 +1556,17 @@ out_unlock:
if (!locked_page)
mapping_set_error(inode->vfs_inode.i_mapping, ret);
extent_clear_unlock_delalloc(inode, orig_start, start - 1,
- locked_page, 0, page_ops);
+ locked_page, NULL, 0, page_ops);
}
/*
+ * At this point we're unlocked, we want to make sure we're only
+ * clearing these flags under the extent lock, so lock the rest of the
+ * range and clear everything up.
+ */
+ lock_extent(&inode->io_tree, start, end, NULL);
+
+ /*
* For the range (2). If we reserved an extent for our delalloc range
* (or a subrange) and failed to create the respective ordered extent,
* then it means that when we reserved the extent we decremented the
@@ -1552,7 +1579,7 @@ out_unlock:
if (extent_reserved) {
extent_clear_unlock_delalloc(inode, start,
start + cur_alloc_size - 1,
- locked_page,
+ locked_page, &cached,
clear_bits,
page_ops);
start += cur_alloc_size;
@@ -1567,7 +1594,7 @@ out_unlock:
if (start < end) {
clear_bits |= EXTENT_CLEAR_DATA_RESV;
extent_clear_unlock_delalloc(inode, start, end, locked_page,
- clear_bits, page_ops);
+ &cached, clear_bits, page_ops);
}
return ret;
}
@@ -1640,7 +1667,6 @@ static bool run_delalloc_compressed(struct btrfs_inode *inode,
if (!ctx)
return false;
- unlock_extent(&inode->io_tree, start, end, NULL);
set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags);
async_chunk = ctx->chunks;
@@ -1734,29 +1760,6 @@ static noinline int run_delalloc_cow(struct btrfs_inode *inode,
return 1;
}
-static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
- u64 bytenr, u64 num_bytes, bool nowait)
-{
- struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bytenr);
- struct btrfs_ordered_sum *sums;
- int ret;
- LIST_HEAD(list);
-
- ret = btrfs_lookup_csums_list(csum_root, bytenr, bytenr + num_bytes - 1,
- &list, 0, nowait);
- if (ret == 0 && list_empty(&list))
- return 0;
-
- while (!list_empty(&list)) {
- sums = list_entry(list.next, struct btrfs_ordered_sum, list);
- list_del(&sums->list);
- kfree(sums);
- }
- if (ret < 0)
- return ret;
- return 1;
-}
-
static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page,
const u64 start, const u64 end)
{
@@ -1764,6 +1767,7 @@ static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page,
const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root);
const u64 range_bytes = end + 1 - start;
struct extent_io_tree *io_tree = &inode->io_tree;
+ struct extent_state *cached_state = NULL;
u64 range_start = start;
u64 count;
int ret;
@@ -1800,6 +1804,7 @@ static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page,
* group that contains that extent to RO mode and therefore force COW
* when starting writeback.
*/
+ lock_extent(io_tree, start, end, &cached_state);
count = count_range_bits(io_tree, &range_start, end, range_bytes,
EXTENT_NORESERVE, 0, NULL);
if (count > 0 || is_space_ino || is_reloc_ino) {
@@ -1818,6 +1823,7 @@ static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page,
clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE,
NULL);
}
+ unlock_extent(io_tree, start, end, &cached_state);
/*
* Don't try to create inline extents, as a mix of inline extent that
@@ -1871,6 +1877,7 @@ static int can_nocow_file_extent(struct btrfs_path *path,
struct extent_buffer *leaf = path->nodes[0];
struct btrfs_root *root = inode->root;
struct btrfs_file_extent_item *fi;
+ struct btrfs_root *csum_root;
u64 extent_end;
u8 extent_type;
int can_nocow = 0;
@@ -1931,7 +1938,7 @@ static int can_nocow_file_extent(struct btrfs_path *path,
if (args->free_path) {
/*
* We don't need the path anymore, plus through the
- * csum_exist_in_range() call below we will end up allocating
+ * btrfs_lookup_csums_list() call below we will end up allocating
* another path. So free the path to avoid unnecessary extra
* memory usage.
*/
@@ -1952,8 +1959,11 @@ static int can_nocow_file_extent(struct btrfs_path *path,
* Force COW if csums exist in the range. This ensures that csums for a
* given extent are either valid or do not exist.
*/
- ret = csum_exist_in_range(root->fs_info, args->disk_bytenr, args->num_bytes,
- nowait);
+
+ csum_root = btrfs_csum_root(root->fs_info, args->disk_bytenr);
+ ret = btrfs_lookup_csums_list(csum_root, args->disk_bytenr,
+ args->disk_bytenr + args->num_bytes - 1,
+ NULL, nowait);
WARN_ON_ONCE(ret > 0 && is_freespace_inode);
if (ret != 0)
goto out;
@@ -2003,12 +2013,13 @@ static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
nocow_args.end = end;
nocow_args.writeback_path = true;
- while (1) {
+ while (cur_offset <= end) {
struct btrfs_block_group *nocow_bg = NULL;
struct btrfs_ordered_extent *ordered;
struct btrfs_key found_key;
struct btrfs_file_extent_item *fi;
struct extent_buffer *leaf;
+ struct extent_state *cached_state = NULL;
u64 extent_end;
u64 ram_bytes;
u64 nocow_end;
@@ -2146,6 +2157,8 @@ must_cow:
}
nocow_end = cur_offset + nocow_args.num_bytes - 1;
+ lock_extent(&inode->io_tree, cur_offset, nocow_end, &cached_state);
+
is_prealloc = extent_type == BTRFS_FILE_EXTENT_PREALLOC;
if (is_prealloc) {
u64 orig_start = found_key.offset - nocow_args.extent_offset;
@@ -2159,6 +2172,8 @@ must_cow:
ram_bytes, BTRFS_COMPRESS_NONE,
BTRFS_ORDERED_PREALLOC);
if (IS_ERR(em)) {
+ unlock_extent(&inode->io_tree, cur_offset,
+ nocow_end, &cached_state);
btrfs_dec_nocow_writers(nocow_bg);
ret = PTR_ERR(em);
goto error;
@@ -2179,6 +2194,8 @@ must_cow:
btrfs_drop_extent_map_range(inode, cur_offset,
nocow_end, false);
}
+ unlock_extent(&inode->io_tree, cur_offset,
+ nocow_end, &cached_state);
ret = PTR_ERR(ordered);
goto error;
}
@@ -2193,8 +2210,8 @@ must_cow:
btrfs_put_ordered_extent(ordered);
extent_clear_unlock_delalloc(inode, cur_offset, nocow_end,
- locked_page, EXTENT_LOCKED |
- EXTENT_DELALLOC |
+ locked_page, &cached_state,
+ EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_CLEAR_DATA_RESV,
PAGE_UNLOCK | PAGE_SET_ORDERED);
@@ -2207,8 +2224,6 @@ must_cow:
*/
if (ret)
goto error;
- if (cur_offset > end)
- break;
}
btrfs_release_path(path);
@@ -2234,13 +2249,23 @@ error:
*/
if (cow_start != (u64)-1)
cur_offset = cow_start;
- if (cur_offset < end)
+
+ /*
+ * We need to lock the extent here because we're clearing DELALLOC and
+ * we're not locked at this point.
+ */
+ if (cur_offset < end) {
+ struct extent_state *cached = NULL;
+
+ lock_extent(&inode->io_tree, cur_offset, end, &cached);
extent_clear_unlock_delalloc(inode, cur_offset, end,
- locked_page, EXTENT_LOCKED |
- EXTENT_DELALLOC | EXTENT_DEFRAG |
+ locked_page, &cached,
+ EXTENT_LOCKED | EXTENT_DELALLOC |
+ EXTENT_DEFRAG |
EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
PAGE_START_WRITEBACK |
PAGE_END_WRITEBACK);
+ }
btrfs_free_path(path);
return ret;
}
@@ -2533,7 +2558,7 @@ void btrfs_clear_delalloc_extent(struct btrfs_inode *inode,
*/
if (bits & EXTENT_CLEAR_META_RESV &&
root != fs_info->tree_root)
- btrfs_delalloc_release_metadata(inode, len, false);
+ btrfs_delalloc_release_metadata(inode, len, true);
/* For sanity tests. */
if (btrfs_is_testing(fs_info))
@@ -3182,7 +3207,6 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
btrfs_abort_transaction(trans, ret);
goto out;
}
- ret = 0;
out:
clear_extent_bit(&inode->io_tree, start, end, clear_bits,
&cached_state);
@@ -3201,9 +3225,8 @@ out:
* set the mapping error, so we need to set it if we're the ones
* marking this ordered extent as failed.
*/
- if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR,
- &ordered_extent->flags))
- mapping_set_error(ordered_extent->inode->i_mapping, -EIO);
+ if (ret)
+ btrfs_mark_ordered_extent_error(ordered_extent);
if (truncated)
unwritten_start += logical_len;
@@ -3257,7 +3280,7 @@ out:
* Actually free the qgroup rsv which was released when
* the ordered extent was created.
*/
- btrfs_qgroup_free_refroot(fs_info, inode->root->root_key.objectid,
+ btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(inode->root),
ordered_extent->qgroup_rsv,
BTRFS_QGROUP_RSV_DATA);
}
@@ -3924,7 +3947,7 @@ cache_acl:
btrfs_err(fs_info,
"error loading props for ino %llu (root %llu): %d",
btrfs_ino(BTRFS_I(inode)),
- root->root_key.objectid, ret);
+ btrfs_root_id(root), ret);
}
if (path != in_path)
btrfs_free_path(path);
@@ -4283,7 +4306,7 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
/* This needs to handle no-key deletions later on */
if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
- objectid = inode->root->root_key.objectid;
+ objectid = btrfs_root_id(inode->root);
} else if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
objectid = inode->location.objectid;
} else {
@@ -4341,7 +4364,7 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
btrfs_release_path(path);
} else {
ret = btrfs_del_root_ref(trans, objectid,
- root->root_key.objectid, dir_ino,
+ btrfs_root_id(root), dir_ino,
&index, &fname.disk_name);
if (ret) {
btrfs_abort_transaction(trans, ret);
@@ -4391,7 +4414,7 @@ static noinline int may_destroy_subvol(struct btrfs_root *root)
dir_id, &name, 0);
if (di && !IS_ERR(di)) {
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
- if (key.objectid == root->root_key.objectid) {
+ if (key.objectid == btrfs_root_id(root)) {
ret = -EPERM;
btrfs_err(fs_info,
"deleting default subvolume %llu is not allowed",
@@ -4401,7 +4424,7 @@ static noinline int may_destroy_subvol(struct btrfs_root *root)
btrfs_release_path(path);
}
- key.objectid = root->root_key.objectid;
+ key.objectid = btrfs_root_id(root);
key.type = BTRFS_ROOT_REF_KEY;
key.offset = (u64)-1;
@@ -4421,8 +4444,7 @@ static noinline int may_destroy_subvol(struct btrfs_root *root)
if (path->slots[0] > 0) {
path->slots[0]--;
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
- if (key.objectid == root->root_key.objectid &&
- key.type == BTRFS_ROOT_REF_KEY)
+ if (key.objectid == btrfs_root_id(root) && key.type == BTRFS_ROOT_REF_KEY)
ret = -ENOTEMPTY;
}
out:
@@ -4434,64 +4456,26 @@ out:
static void btrfs_prune_dentries(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- struct rb_node *node;
- struct rb_node *prev;
- struct btrfs_inode *entry;
- struct inode *inode;
- u64 objectid = 0;
+ struct btrfs_inode *inode;
+ u64 min_ino = 0;
if (!BTRFS_FS_ERROR(fs_info))
WARN_ON(btrfs_root_refs(&root->root_item) != 0);
- spin_lock(&root->inode_lock);
-again:
- node = root->inode_tree.rb_node;
- prev = NULL;
- while (node) {
- prev = node;
- entry = rb_entry(node, struct btrfs_inode, rb_node);
-
- if (objectid < btrfs_ino(entry))
- node = node->rb_left;
- else if (objectid > btrfs_ino(entry))
- node = node->rb_right;
- else
- break;
- }
- if (!node) {
- while (prev) {
- entry = rb_entry(prev, struct btrfs_inode, rb_node);
- if (objectid <= btrfs_ino(entry)) {
- node = prev;
- break;
- }
- prev = rb_next(prev);
- }
- }
- while (node) {
- entry = rb_entry(node, struct btrfs_inode, rb_node);
- objectid = btrfs_ino(entry) + 1;
- inode = igrab(&entry->vfs_inode);
- if (inode) {
- spin_unlock(&root->inode_lock);
- if (atomic_read(&inode->i_count) > 1)
- d_prune_aliases(inode);
- /*
- * btrfs_drop_inode will have it removed from the inode
- * cache when its usage count hits zero.
- */
- iput(inode);
- cond_resched();
- spin_lock(&root->inode_lock);
- goto again;
- }
-
- if (cond_resched_lock(&root->inode_lock))
- goto again;
+ inode = btrfs_find_first_inode(root, min_ino);
+ while (inode) {
+ if (atomic_read(&inode->vfs_inode.i_count) > 1)
+ d_prune_aliases(&inode->vfs_inode);
- node = rb_next(node);
+ min_ino = btrfs_ino(inode) + 1;
+ /*
+ * btrfs_drop_inode() will have it removed from the inode
+ * cache when its usage count hits zero.
+ */
+ iput(&inode->vfs_inode);
+ cond_resched();
+ inode = btrfs_find_first_inode(root, min_ino);
}
- spin_unlock(&root->inode_lock);
}
int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
@@ -4503,6 +4487,7 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
struct btrfs_trans_handle *trans;
struct btrfs_block_rsv block_rsv;
u64 root_flags;
+ u64 qgroup_reserved = 0;
int ret;
down_write(&fs_info->subvol_sem);
@@ -4517,7 +4502,7 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
spin_unlock(&dest->root_item_lock);
btrfs_warn(fs_info,
"attempt to delete subvolume %llu during send",
- dest->root_key.objectid);
+ btrfs_root_id(dest));
ret = -EPERM;
goto out_up_write;
}
@@ -4525,7 +4510,7 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
spin_unlock(&dest->root_item_lock);
btrfs_warn(fs_info,
"attempt to delete subvolume %llu with active swapfile",
- root->root_key.objectid);
+ btrfs_root_id(root));
ret = -EPERM;
goto out_up_write;
}
@@ -4547,12 +4532,20 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
if (ret)
goto out_undead;
+ qgroup_reserved = block_rsv.qgroup_rsv_reserved;
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out_release;
}
+ ret = btrfs_record_root_in_trans(trans, root);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_end_trans;
+ }
+ btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
+ qgroup_reserved = 0;
trans->block_rsv = &block_rsv;
trans->bytes_reserved = block_rsv.size;
@@ -4578,7 +4571,7 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) {
ret = btrfs_insert_orphan_item(trans,
fs_info->tree_root,
- dest->root_key.objectid);
+ btrfs_root_id(dest));
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
@@ -4586,8 +4579,7 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
}
ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid,
- BTRFS_UUID_KEY_SUBVOL,
- dest->root_key.objectid);
+ BTRFS_UUID_KEY_SUBVOL, btrfs_root_id(dest));
if (ret && ret != -ENOENT) {
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
@@ -4596,7 +4588,7 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
ret = btrfs_uuid_tree_remove(trans,
dest->root_item.received_uuid,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
- dest->root_key.objectid);
+ btrfs_root_id(dest));
if (ret && ret != -ENOENT) {
btrfs_abort_transaction(trans, ret);
goto out_end_trans;
@@ -4611,7 +4603,9 @@ out_end_trans:
ret = btrfs_end_transaction(trans);
inode->i_flags |= S_DEAD;
out_release:
- btrfs_subvolume_release_metadata(root, &block_rsv);
+ btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL);
+ if (qgroup_reserved)
+ btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
out_undead:
if (ret) {
spin_lock(&dest->root_item_lock);
@@ -4635,7 +4629,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = d_inode(dentry);
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
- int err = 0;
+ int ret = 0;
struct btrfs_trans_handle *trans;
u64 last_unlink_trans;
struct fscrypt_name fname;
@@ -4651,33 +4645,33 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
return btrfs_delete_subvolume(BTRFS_I(dir), dentry);
}
- err = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
- if (err)
- return err;
+ ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
+ if (ret)
+ return ret;
/* This needs to handle no-key deletions later on */
trans = __unlink_start_trans(BTRFS_I(dir));
if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
+ ret = PTR_ERR(trans);
goto out_notrans;
}
if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
- err = btrfs_unlink_subvol(trans, BTRFS_I(dir), dentry);
+ ret = btrfs_unlink_subvol(trans, BTRFS_I(dir), dentry);
goto out;
}
- err = btrfs_orphan_add(trans, BTRFS_I(inode));
- if (err)
+ ret = btrfs_orphan_add(trans, BTRFS_I(inode));
+ if (ret)
goto out;
last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
/* now the directory is empty */
- err = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
+ ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
&fname.disk_name);
- if (!err) {
+ if (!ret) {
btrfs_i_size_write(BTRFS_I(inode), 0);
/*
* Propagate the last_unlink_trans value of the deleted dir to
@@ -4699,7 +4693,7 @@ out_notrans:
btrfs_btree_balance_dirty(fs_info);
fscrypt_free_filename(&fname);
- return err;
+ return ret;
}
/*
@@ -4923,16 +4917,16 @@ int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
u64 last_byte;
u64 cur_offset;
u64 hole_size;
- int err = 0;
+ int ret = 0;
/*
* If our size started in the middle of a block we need to zero out the
* rest of the block before we expand the i_size, otherwise we could
* expose stale data.
*/
- err = btrfs_truncate_block(inode, oldsize, 0, 0);
- if (err)
- return err;
+ ret = btrfs_truncate_block(inode, oldsize, 0, 0);
+ if (ret)
+ return ret;
if (size <= hole_start)
return 0;
@@ -4943,7 +4937,7 @@ int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
while (1) {
em = btrfs_get_extent(inode, NULL, cur_offset, block_end - cur_offset);
if (IS_ERR(em)) {
- err = PTR_ERR(em);
+ ret = PTR_ERR(em);
em = NULL;
break;
}
@@ -4954,13 +4948,13 @@ int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
if (!(em->flags & EXTENT_FLAG_PREALLOC)) {
struct extent_map *hole_em;
- err = maybe_insert_hole(inode, cur_offset, hole_size);
- if (err)
+ ret = maybe_insert_hole(inode, cur_offset, hole_size);
+ if (ret)
break;
- err = btrfs_inode_set_file_extent_range(inode,
+ ret = btrfs_inode_set_file_extent_range(inode,
cur_offset, hole_size);
- if (err)
+ if (ret)
break;
hole_em = alloc_extent_map();
@@ -4981,12 +4975,12 @@ int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
hole_em->ram_bytes = hole_size;
hole_em->generation = btrfs_get_fs_generation(fs_info);
- err = btrfs_replace_extent_map_range(inode, hole_em, true);
+ ret = btrfs_replace_extent_map_range(inode, hole_em, true);
free_extent_map(hole_em);
} else {
- err = btrfs_inode_set_file_extent_range(inode,
+ ret = btrfs_inode_set_file_extent_range(inode,
cur_offset, hole_size);
- if (err)
+ if (ret)
break;
}
next:
@@ -4998,7 +4992,7 @@ next:
}
free_extent_map(em);
unlock_extent(io_tree, hole_start, block_end - 1, &cached_state);
- return err;
+ return ret;
}
static int btrfs_setsize(struct inode *inode, struct iattr *attr)
@@ -5274,7 +5268,7 @@ void btrfs_evict_inode(struct inode *inode)
if (inode->i_nlink &&
((btrfs_root_refs(&root->root_item) != 0 &&
- root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
+ btrfs_root_id(root) != BTRFS_ROOT_TREE_OBJECTID) ||
btrfs_is_free_space_inode(BTRFS_I(inode))))
goto out;
@@ -5286,7 +5280,7 @@ void btrfs_evict_inode(struct inode *inode)
if (inode->i_nlink > 0) {
BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
- root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
+ btrfs_root_id(root) != BTRFS_ROOT_TREE_OBJECTID);
goto out;
}
@@ -5458,7 +5452,7 @@ static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
}
err = -ENOENT;
- key.objectid = dir->root->root_key.objectid;
+ key.objectid = btrfs_root_id(dir->root);
key.type = BTRFS_ROOT_REF_KEY;
key.offset = location->objectid;
@@ -6417,8 +6411,7 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
if (ret) {
btrfs_err(fs_info,
"error inheriting props for ino %llu (root %llu): %d",
- btrfs_ino(BTRFS_I(inode)), root->root_key.objectid,
- ret);
+ btrfs_ino(BTRFS_I(inode)), btrfs_root_id(root), ret);
}
/*
@@ -6491,7 +6484,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
ret = btrfs_add_root_ref(trans, key.objectid,
- root->root_key.objectid, parent_ino,
+ btrfs_root_id(root), parent_ino,
index, name);
} else if (add_backref) {
ret = btrfs_insert_inode_ref(trans, root, name,
@@ -6534,7 +6527,7 @@ fail_dir_item:
u64 local_index;
int err;
err = btrfs_del_root_ref(trans, key.objectid,
- root->root_key.objectid, parent_ino,
+ btrfs_root_id(root), parent_ino,
&local_index, name);
if (err)
btrfs_abort_transaction(trans, err);
@@ -6632,7 +6625,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
int drop_inode = 0;
/* do not allow sys_link's with other subvols of the same device */
- if (root->root_key.objectid != BTRFS_I(inode)->root->root_key.objectid)
+ if (btrfs_root_id(root) != btrfs_root_id(BTRFS_I(inode)->root))
return -EXDEV;
if (inode->i_nlink >= BTRFS_LINK_MAX)
@@ -6979,7 +6972,7 @@ insert:
}
write_lock(&em_tree->lock);
- ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len);
+ ret = btrfs_add_extent_mapping(inode, &em, start, len);
write_unlock(&em_tree->lock);
out:
btrfs_free_path(path);
@@ -7306,11 +7299,49 @@ static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
struct extent_map *em;
int ret;
+ /*
+ * Note the missing NOCOW type.
+ *
+ * For pure NOCOW writes, we should not create an io extent map, but
+ * just reusing the existing one.
+ * Only PREALLOC writes (NOCOW write into preallocated range) can
+ * create an io extent map.
+ */
ASSERT(type == BTRFS_ORDERED_PREALLOC ||
type == BTRFS_ORDERED_COMPRESSED ||
- type == BTRFS_ORDERED_NOCOW ||
type == BTRFS_ORDERED_REGULAR);
+ switch (type) {
+ case BTRFS_ORDERED_PREALLOC:
+ /* Uncompressed extents. */
+ ASSERT(block_len == len);
+
+ /* We're only referring part of a larger preallocated extent. */
+ ASSERT(block_len <= ram_bytes);
+ break;
+ case BTRFS_ORDERED_REGULAR:
+ /* Uncompressed extents. */
+ ASSERT(block_len == len);
+
+ /* COW results a new extent matching our file extent size. */
+ ASSERT(orig_block_len == len);
+ ASSERT(ram_bytes == len);
+
+ /* Since it's a new extent, we should not have any offset. */
+ ASSERT(orig_start == start);
+ break;
+ case BTRFS_ORDERED_COMPRESSED:
+ /* Must be compressed. */
+ ASSERT(compress_type != BTRFS_COMPRESS_NONE);
+
+ /*
+ * Encoded write can make us to refer to part of the
+ * uncompressed extent.
+ */
+ ASSERT(len <= ram_bytes);
+ break;
+ }
+
em = alloc_extent_map();
if (!em)
return ERR_PTR(-ENOMEM);
@@ -7324,9 +7355,7 @@ static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
em->ram_bytes = ram_bytes;
em->generation = -1;
em->flags |= EXTENT_FLAG_PINNED;
- if (type == BTRFS_ORDERED_PREALLOC)
- em->flags |= EXTENT_FLAG_FILLING;
- else if (type == BTRFS_ORDERED_COMPRESSED)
+ if (type == BTRFS_ORDERED_COMPRESSED)
extent_map_set_compression(em, compress_type);
ret = btrfs_replace_extent_map_range(inode, em, true);
@@ -7913,17 +7942,6 @@ static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
return ret;
}
-static int btrfs_writepages(struct address_space *mapping,
- struct writeback_control *wbc)
-{
- return extent_writepages(mapping, wbc);
-}
-
-static void btrfs_readahead(struct readahead_control *rac)
-{
- extent_readahead(rac);
-}
-
/*
* For release_folio() and invalidate_folio() we have a race window where
* folio_end_writeback() is called but the subpage spinlock is not yet released.
@@ -7960,13 +7978,12 @@ static void wait_subpage_spinlock(struct page *page)
static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
{
- int ret = try_release_extent_mapping(&folio->page, gfp_flags);
-
- if (ret == 1) {
+ if (try_release_extent_mapping(&folio->page, gfp_flags)) {
wait_subpage_spinlock(&folio->page);
clear_page_extent_mapped(&folio->page);
+ return true;
}
- return ret;
+ return false;
}
static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
@@ -8164,173 +8181,6 @@ next:
clear_page_extent_mapped(&folio->page);
}
-/*
- * btrfs_page_mkwrite() is not allowed to change the file size as it gets
- * called from a page fault handler when a page is first dirtied. Hence we must
- * be careful to check for EOF conditions here. We set the page up correctly
- * for a written page which means we get ENOSPC checking when writing into
- * holes and correct delalloc and unwritten extent mapping on filesystems that
- * support these features.
- *
- * We are not allowed to take the i_mutex here so we have to play games to
- * protect against truncate races as the page could now be beyond EOF. Because
- * truncate_setsize() writes the inode size before removing pages, once we have
- * the page lock we can determine safely if the page is beyond EOF. If it is not
- * beyond EOF, then the page is guaranteed safe against truncation until we
- * unlock the page.
- */
-vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf)
-{
- struct page *page = vmf->page;
- struct folio *folio = page_folio(page);
- struct inode *inode = file_inode(vmf->vma->vm_file);
- struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
- struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
- struct btrfs_ordered_extent *ordered;
- struct extent_state *cached_state = NULL;
- struct extent_changeset *data_reserved = NULL;
- unsigned long zero_start;
- loff_t size;
- vm_fault_t ret;
- int ret2;
- int reserved = 0;
- u64 reserved_space;
- u64 page_start;
- u64 page_end;
- u64 end;
-
- ASSERT(folio_order(folio) == 0);
-
- reserved_space = PAGE_SIZE;
-
- sb_start_pagefault(inode->i_sb);
- page_start = page_offset(page);
- page_end = page_start + PAGE_SIZE - 1;
- end = page_end;
-
- /*
- * Reserving delalloc space after obtaining the page lock can lead to
- * deadlock. For example, if a dirty page is locked by this function
- * and the call to btrfs_delalloc_reserve_space() ends up triggering
- * dirty page write out, then the btrfs_writepages() function could
- * end up waiting indefinitely to get a lock on the page currently
- * being processed by btrfs_page_mkwrite() function.
- */
- ret2 = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved,
- page_start, reserved_space);
- if (!ret2) {
- ret2 = file_update_time(vmf->vma->vm_file);
- reserved = 1;
- }
- if (ret2) {
- ret = vmf_error(ret2);
- if (reserved)
- goto out;
- goto out_noreserve;
- }
-
- ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
-again:
- down_read(&BTRFS_I(inode)->i_mmap_lock);
- lock_page(page);
- size = i_size_read(inode);
-
- if ((page->mapping != inode->i_mapping) ||
- (page_start >= size)) {
- /* page got truncated out from underneath us */
- goto out_unlock;
- }
- wait_on_page_writeback(page);
-
- lock_extent(io_tree, page_start, page_end, &cached_state);
- ret2 = set_page_extent_mapped(page);
- if (ret2 < 0) {
- ret = vmf_error(ret2);
- unlock_extent(io_tree, page_start, page_end, &cached_state);
- goto out_unlock;
- }
-
- /*
- * we can't set the delalloc bits if there are pending ordered
- * extents. Drop our locks and wait for them to finish
- */
- ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
- PAGE_SIZE);
- if (ordered) {
- unlock_extent(io_tree, page_start, page_end, &cached_state);
- unlock_page(page);
- up_read(&BTRFS_I(inode)->i_mmap_lock);
- btrfs_start_ordered_extent(ordered);
- btrfs_put_ordered_extent(ordered);
- goto again;
- }
-
- if (page->index == ((size - 1) >> PAGE_SHIFT)) {
- reserved_space = round_up(size - page_start,
- fs_info->sectorsize);
- if (reserved_space < PAGE_SIZE) {
- end = page_start + reserved_space - 1;
- btrfs_delalloc_release_space(BTRFS_I(inode),
- data_reserved, page_start,
- PAGE_SIZE - reserved_space, true);
- }
- }
-
- /*
- * page_mkwrite gets called when the page is firstly dirtied after it's
- * faulted in, but write(2) could also dirty a page and set delalloc
- * bits, thus in this case for space account reason, we still need to
- * clear any delalloc bits within this page range since we have to
- * reserve data&meta space before lock_page() (see above comments).
- */
- clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
- EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
- EXTENT_DEFRAG, &cached_state);
-
- ret2 = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, end, 0,
- &cached_state);
- if (ret2) {
- unlock_extent(io_tree, page_start, page_end, &cached_state);
- ret = VM_FAULT_SIGBUS;
- goto out_unlock;
- }
-
- /* page is wholly or partially inside EOF */
- if (page_start + PAGE_SIZE > size)
- zero_start = offset_in_page(size);
- else
- zero_start = PAGE_SIZE;
-
- if (zero_start != PAGE_SIZE)
- memzero_page(page, zero_start, PAGE_SIZE - zero_start);
-
- btrfs_folio_clear_checked(fs_info, folio, page_start, PAGE_SIZE);
- btrfs_folio_set_dirty(fs_info, folio, page_start, end + 1 - page_start);
- btrfs_folio_set_uptodate(fs_info, folio, page_start, end + 1 - page_start);
-
- btrfs_set_inode_last_sub_trans(BTRFS_I(inode));
-
- unlock_extent(io_tree, page_start, page_end, &cached_state);
- up_read(&BTRFS_I(inode)->i_mmap_lock);
-
- btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
- sb_end_pagefault(inode->i_sb);
- extent_changeset_free(data_reserved);
- return VM_FAULT_LOCKED;
-
-out_unlock:
- unlock_page(page);
- up_read(&BTRFS_I(inode)->i_mmap_lock);
-out:
- btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
- btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved, page_start,
- reserved_space, (ret != 0));
-out_noreserve:
- sb_end_pagefault(inode->i_sb);
- extent_changeset_free(data_reserved);
- return ret;
-}
-
static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
{
struct btrfs_truncate_control control = {
@@ -8779,6 +8629,9 @@ static int btrfs_getattr(struct mnt_idmap *idmap,
generic_fillattr(idmap, request_mask, inode, stat);
stat->dev = BTRFS_I(inode)->root->anon_dev;
+ stat->subvol = BTRFS_I(inode)->root->root_key.objectid;
+ stat->result_mask |= STATX_SUBVOL;
+
spin_lock(&BTRFS_I(inode)->lock);
delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes;
inode_bytes = inode_get_bytes(inode);
@@ -9658,7 +9511,7 @@ free_qgroup:
* or we leak qgroup data reservation.
*/
btrfs_qgroup_free_refroot(inode->root->fs_info,
- inode->root->root_key.objectid, qgroup_released,
+ btrfs_root_id(inode->root), qgroup_released,
BTRFS_QGROUP_RSV_DATA);
return ERR_PTR(ret);
}
@@ -10306,8 +10159,8 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
size_t orig_count;
u64 start, end;
u64 num_bytes, ram_bytes, disk_num_bytes;
- unsigned long nr_pages, i;
- struct page **pages;
+ unsigned long nr_folios, i;
+ struct folio **folios;
struct btrfs_key ins;
bool extent_reserved = false;
struct extent_map *em;
@@ -10396,24 +10249,24 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
* isn't.
*/
disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize);
- nr_pages = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE);
- pages = kvcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL_ACCOUNT);
- if (!pages)
+ nr_folios = DIV_ROUND_UP(disk_num_bytes, PAGE_SIZE);
+ folios = kvcalloc(nr_folios, sizeof(struct page *), GFP_KERNEL_ACCOUNT);
+ if (!folios)
return -ENOMEM;
- for (i = 0; i < nr_pages; i++) {
+ for (i = 0; i < nr_folios; i++) {
size_t bytes = min_t(size_t, PAGE_SIZE, iov_iter_count(from));
char *kaddr;
- pages[i] = alloc_page(GFP_KERNEL_ACCOUNT);
- if (!pages[i]) {
+ folios[i] = folio_alloc(GFP_KERNEL_ACCOUNT, 0);
+ if (!folios[i]) {
ret = -ENOMEM;
- goto out_pages;
+ goto out_folios;
}
- kaddr = kmap_local_page(pages[i]);
+ kaddr = kmap_local_folio(folios[i], 0);
if (copy_from_iter(kaddr, bytes, from) != bytes) {
kunmap_local(kaddr);
ret = -EFAULT;
- goto out_pages;
+ goto out_folios;
}
if (bytes < PAGE_SIZE)
memset(kaddr + bytes, 0, PAGE_SIZE - bytes);
@@ -10425,12 +10278,12 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, num_bytes);
if (ret)
- goto out_pages;
+ goto out_folios;
ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping,
start >> PAGE_SHIFT,
end >> PAGE_SHIFT);
if (ret)
- goto out_pages;
+ goto out_folios;
lock_extent(io_tree, start, end, &cached_state);
ordered = btrfs_lookup_ordered_range(inode, start, num_bytes);
if (!ordered &&
@@ -10458,10 +10311,12 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
goto out_qgroup_free_data;
/* Try an inline extent first. */
- if (start == 0 && encoded->unencoded_len == encoded->len &&
- encoded->unencoded_offset == 0) {
- ret = cow_file_range_inline(inode, encoded->len, orig_count,
- compression, pages, true);
+ if (encoded->unencoded_len == encoded->len &&
+ encoded->unencoded_offset == 0 &&
+ can_cow_file_range_inline(inode, start, encoded->len, orig_count)) {
+ ret = __cow_file_range_inline(inode, start, encoded->len,
+ orig_count, compression, folios[0],
+ true);
if (ret <= 0) {
if (ret == 0)
ret = orig_count;
@@ -10505,7 +10360,7 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
btrfs_delalloc_release_extents(inode, num_bytes);
- btrfs_submit_compressed_write(ordered, pages, nr_pages, 0, false);
+ btrfs_submit_compressed_write(ordered, folios, nr_folios, 0, false);
ret = orig_count;
goto out;
@@ -10527,12 +10382,12 @@ out_free_data_space:
btrfs_free_reserved_data_space_noquota(fs_info, disk_num_bytes);
out_unlock:
unlock_extent(io_tree, start, end, &cached_state);
-out_pages:
- for (i = 0; i < nr_pages; i++) {
- if (pages[i])
- __free_page(pages[i]);
+out_folios:
+ for (i = 0; i < nr_folios; i++) {
+ if (folios[i])
+ __folio_put(folios[i]);
}
- kvfree(pages);
+ kvfree(folios);
out:
if (ret >= 0)
iocb->ki_pos += encoded->len;
@@ -10759,7 +10614,7 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
btrfs_exclop_finish(fs_info);
btrfs_warn(fs_info,
"cannot activate swapfile because subvolume %llu is being deleted",
- root->root_key.objectid);
+ btrfs_root_id(root));
return -EPERM;
}
atomic_inc(&root->nr_swapfiles);
@@ -10985,7 +10840,7 @@ void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 en
if (ordered) {
btrfs_err(root->fs_info,
"found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])",
- start, end, btrfs_ino(inode), root->root_key.objectid,
+ start, end, btrfs_ino(inode), btrfs_root_id(root),
ordered->file_offset,
ordered->file_offset + ordered->num_bytes - 1);
btrfs_put_ordered_extent(ordered);
@@ -10994,6 +10849,65 @@ void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 en
ASSERT(ordered == NULL);
}
+/*
+ * Find the first inode with a minimum number.
+ *
+ * @root: The root to search for.
+ * @min_ino: The minimum inode number.
+ *
+ * Find the first inode in the @root with a number >= @min_ino and return it.
+ * Returns NULL if no such inode found.
+ */
+struct btrfs_inode *btrfs_find_first_inode(struct btrfs_root *root, u64 min_ino)
+{
+ struct rb_node *node;
+ struct rb_node *prev;
+ struct btrfs_inode *inode;
+
+ spin_lock(&root->inode_lock);
+again:
+ node = root->inode_tree.rb_node;
+ prev = NULL;
+ while (node) {
+ prev = node;
+ inode = rb_entry(node, struct btrfs_inode, rb_node);
+ if (min_ino < btrfs_ino(inode))
+ node = node->rb_left;
+ else if (min_ino > btrfs_ino(inode))
+ node = node->rb_right;
+ else
+ break;
+ }
+
+ if (!node) {
+ while (prev) {
+ inode = rb_entry(prev, struct btrfs_inode, rb_node);
+ if (min_ino <= btrfs_ino(inode)) {
+ node = prev;
+ break;
+ }
+ prev = rb_next(prev);
+ }
+ }
+
+ while (node) {
+ inode = rb_entry(prev, struct btrfs_inode, rb_node);
+ if (igrab(&inode->vfs_inode)) {
+ spin_unlock(&root->inode_lock);
+ return inode;
+ }
+
+ min_ino = btrfs_ino(inode) + 1;
+ if (cond_resched_lock(&root->inode_lock))
+ goto again;
+
+ node = rb_next(node);
+ }
+ spin_unlock(&root->inode_lock);
+
+ return NULL;
+}
+
static const struct inode_operations btrfs_dir_inode_operations = {
.getattr = btrfs_getattr,
.lookup = btrfs_lookup,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 294e31edec9d..efd5d6e9589e 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -613,6 +613,7 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
int ret;
dev_t anon_dev;
u64 objectid;
+ u64 qgroup_reserved = 0;
root_item = kzalloc(sizeof(*root_item), GFP_KERNEL);
if (!root_item)
@@ -650,19 +651,24 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
trans_num_items, false);
if (ret)
goto out_new_inode_args;
+ qgroup_reserved = block_rsv.qgroup_rsv_reserved;
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
- btrfs_subvolume_release_metadata(root, &block_rsv);
- goto out_new_inode_args;
+ goto out_release_rsv;
}
+ ret = btrfs_record_root_in_trans(trans, BTRFS_I(dir)->root);
+ if (ret)
+ goto out;
+ btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
+ qgroup_reserved = 0;
trans->block_rsv = &block_rsv;
trans->bytes_reserved = block_rsv.size;
/* Tree log can't currently deal with an inode which is a new root. */
btrfs_set_log_full_commit(trans);
- ret = btrfs_qgroup_inherit(trans, 0, objectid, root->root_key.objectid, inherit);
+ ret = btrfs_qgroup_inherit(trans, 0, objectid, btrfs_root_id(root), inherit);
if (ret)
goto out;
@@ -767,9 +773,11 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
out:
trans->block_rsv = NULL;
trans->bytes_reserved = 0;
- btrfs_subvolume_release_metadata(root, &block_rsv);
-
btrfs_end_transaction(trans);
+out_release_rsv:
+ btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL);
+ if (qgroup_reserved)
+ btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
out_new_inode_args:
btrfs_new_inode_args_destroy(&new_inode_args);
out_inode:
@@ -791,6 +799,8 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
struct btrfs_pending_snapshot *pending_snapshot;
unsigned int trans_num_items;
struct btrfs_trans_handle *trans;
+ struct btrfs_block_rsv *block_rsv;
+ u64 qgroup_reserved = 0;
int ret;
/* We do not support snapshotting right now. */
@@ -827,19 +837,19 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
goto free_pending;
}
- btrfs_init_block_rsv(&pending_snapshot->block_rsv,
- BTRFS_BLOCK_RSV_TEMP);
+ block_rsv = &pending_snapshot->block_rsv;
+ btrfs_init_block_rsv(block_rsv, BTRFS_BLOCK_RSV_TEMP);
/*
* 1 to add dir item
* 1 to add dir index
* 1 to update parent inode item
*/
trans_num_items = create_subvol_num_items(inherit) + 3;
- ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root,
- &pending_snapshot->block_rsv,
+ ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root, block_rsv,
trans_num_items, false);
if (ret)
goto free_pending;
+ qgroup_reserved = block_rsv->qgroup_rsv_reserved;
pending_snapshot->dentry = dentry;
pending_snapshot->root = root;
@@ -852,6 +862,13 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
ret = PTR_ERR(trans);
goto fail;
}
+ ret = btrfs_record_root_in_trans(trans, BTRFS_I(dir)->root);
+ if (ret) {
+ btrfs_end_transaction(trans);
+ goto fail;
+ }
+ btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
+ qgroup_reserved = 0;
trans->pending_snapshot = pending_snapshot;
@@ -881,7 +898,9 @@ fail:
if (ret && pending_snapshot->snap)
pending_snapshot->snap->anon_dev = 0;
btrfs_put_root(pending_snapshot->snap);
- btrfs_subvolume_release_metadata(root, &pending_snapshot->block_rsv);
+ btrfs_block_rsv_release(fs_info, block_rsv, (u64)-1, NULL);
+ if (qgroup_reserved)
+ btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
free_pending:
if (pending_snapshot->anon_dev)
free_anon_bdev(pending_snapshot->anon_dev);
@@ -1491,7 +1510,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
spin_unlock(&root->root_item_lock);
btrfs_warn(fs_info,
"Attempt to set subvolume %llu read-write during send",
- root->root_key.objectid);
+ btrfs_root_id(root));
ret = -EPERM;
goto out_drop_sem;
}
@@ -1900,7 +1919,7 @@ static int btrfs_search_path_in_tree_user(struct mnt_idmap *idmap,
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
struct super_block *sb = inode->i_sb;
struct btrfs_key upper_limit = BTRFS_I(inode)->location;
- u64 treeid = BTRFS_I(inode)->root->root_key.objectid;
+ u64 treeid = btrfs_root_id(BTRFS_I(inode)->root);
u64 dirid = args->dirid;
unsigned long item_off;
unsigned long item_len;
@@ -2072,7 +2091,7 @@ static noinline int btrfs_ioctl_ino_lookup(struct btrfs_root *root,
* path is reset so it's consistent with btrfs_search_path_in_tree.
*/
if (args->treeid == 0)
- args->treeid = root->root_key.objectid;
+ args->treeid = btrfs_root_id(root);
if (args->objectid == BTRFS_FIRST_FREE_OBJECTID) {
args->name[0] = 0;
@@ -2168,7 +2187,7 @@ static int btrfs_ioctl_get_subvol_info(struct inode *inode, void __user *argp)
fs_info = BTRFS_I(inode)->root->fs_info;
/* Get root_item of inode's subvolume */
- key.objectid = BTRFS_I(inode)->root->root_key.objectid;
+ key.objectid = btrfs_root_id(BTRFS_I(inode)->root);
root = btrfs_get_fs_root(fs_info, key.objectid, true);
if (IS_ERR(root)) {
ret = PTR_ERR(root);
@@ -2283,7 +2302,7 @@ static int btrfs_ioctl_get_subvol_rootref(struct btrfs_root *root,
return PTR_ERR(rootrefs);
}
- objectid = root->root_key.objectid;
+ objectid = btrfs_root_id(root);
key.objectid = objectid;
key.type = BTRFS_ROOT_REF_KEY;
key.offset = rootrefs->min_treeid;
@@ -2367,7 +2386,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
struct mnt_idmap *idmap = file_mnt_idmap(file);
char *subvol_name, *subvol_name_ptr = NULL;
int subvol_namelen;
- int err = 0;
+ int ret = 0;
bool destroy_parent = false;
/* We don't support snapshots with extent tree v2 yet. */
@@ -2383,7 +2402,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
return PTR_ERR(vol_args2);
if (vol_args2->flags & ~BTRFS_SUBVOL_DELETE_ARGS_MASK) {
- err = -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
goto out;
}
@@ -2392,31 +2411,31 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
* name, same as v1 currently does.
*/
if (!(vol_args2->flags & BTRFS_SUBVOL_SPEC_BY_ID)) {
- err = btrfs_check_ioctl_vol_args2_subvol_name(vol_args2);
- if (err < 0)
+ ret = btrfs_check_ioctl_vol_args2_subvol_name(vol_args2);
+ if (ret < 0)
goto out;
subvol_name = vol_args2->name;
- err = mnt_want_write_file(file);
- if (err)
+ ret = mnt_want_write_file(file);
+ if (ret)
goto out;
} else {
struct inode *old_dir;
if (vol_args2->subvolid < BTRFS_FIRST_FREE_OBJECTID) {
- err = -EINVAL;
+ ret = -EINVAL;
goto out;
}
- err = mnt_want_write_file(file);
- if (err)
+ ret = mnt_want_write_file(file);
+ if (ret)
goto out;
dentry = btrfs_get_dentry(fs_info->sb,
BTRFS_FIRST_FREE_OBJECTID,
vol_args2->subvolid, 0);
if (IS_ERR(dentry)) {
- err = PTR_ERR(dentry);
+ ret = PTR_ERR(dentry);
goto out_drop_write;
}
@@ -2436,7 +2455,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
*/
dput(dentry);
if (IS_ERR(parent)) {
- err = PTR_ERR(parent);
+ ret = PTR_ERR(parent);
goto out_drop_write;
}
old_dir = dir;
@@ -2460,14 +2479,14 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
* to delete without an idmapped mount.
*/
if (old_dir != dir && idmap != &nop_mnt_idmap) {
- err = -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
goto free_parent;
}
subvol_name_ptr = btrfs_get_subvol_name_from_objectid(
fs_info, vol_args2->subvolid);
if (IS_ERR(subvol_name_ptr)) {
- err = PTR_ERR(subvol_name_ptr);
+ ret = PTR_ERR(subvol_name_ptr);
goto free_parent;
}
/* subvol_name_ptr is already nul terminated */
@@ -2478,14 +2497,14 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
if (IS_ERR(vol_args))
return PTR_ERR(vol_args);
- err = btrfs_check_ioctl_vol_args_path(vol_args);
- if (err < 0)
+ ret = btrfs_check_ioctl_vol_args_path(vol_args);
+ if (ret < 0)
goto out;
subvol_name = vol_args->name;
- err = mnt_want_write_file(file);
- if (err)
+ ret = mnt_want_write_file(file);
+ if (ret)
goto out;
}
@@ -2493,26 +2512,26 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
if (strchr(subvol_name, '/') ||
strncmp(subvol_name, "..", subvol_namelen) == 0) {
- err = -EINVAL;
+ ret = -EINVAL;
goto free_subvol_name;
}
if (!S_ISDIR(dir->i_mode)) {
- err = -ENOTDIR;
+ ret = -ENOTDIR;
goto free_subvol_name;
}
- err = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
- if (err == -EINTR)
+ ret = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
+ if (ret == -EINTR)
goto free_subvol_name;
dentry = lookup_one(idmap, subvol_name, parent, subvol_namelen);
if (IS_ERR(dentry)) {
- err = PTR_ERR(dentry);
+ ret = PTR_ERR(dentry);
goto out_unlock_dir;
}
if (d_really_is_negative(dentry)) {
- err = -ENOENT;
+ ret = -ENOENT;
goto out_dput;
}
@@ -2532,7 +2551,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
* Users who want to delete empty subvols should try
* rmdir(2).
*/
- err = -EPERM;
+ ret = -EPERM;
if (!btrfs_test_opt(fs_info, USER_SUBVOL_RM_ALLOWED))
goto out_dput;
@@ -2543,29 +2562,29 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
* of the subvol, not a random directory contained
* within it.
*/
- err = -EINVAL;
+ ret = -EINVAL;
if (root == dest)
goto out_dput;
- err = inode_permission(idmap, inode, MAY_WRITE | MAY_EXEC);
- if (err)
+ ret = inode_permission(idmap, inode, MAY_WRITE | MAY_EXEC);
+ if (ret)
goto out_dput;
}
/* check if subvolume may be deleted by a user */
- err = btrfs_may_delete(idmap, dir, dentry, 1);
- if (err)
+ ret = btrfs_may_delete(idmap, dir, dentry, 1);
+ if (ret)
goto out_dput;
if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) {
- err = -EINVAL;
+ ret = -EINVAL;
goto out_dput;
}
btrfs_inode_lock(BTRFS_I(inode), 0);
- err = btrfs_delete_subvolume(BTRFS_I(dir), dentry);
+ ret = btrfs_delete_subvolume(BTRFS_I(dir), dentry);
btrfs_inode_unlock(BTRFS_I(inode), 0);
- if (!err)
+ if (!ret)
d_delete_notify(dir, dentry);
out_dput:
@@ -2582,7 +2601,7 @@ out_drop_write:
out:
kfree(vol_args2);
kfree(vol_args);
- return err;
+ return ret;
}
static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
@@ -2962,7 +2981,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
ret = PTR_ERR(new_root);
goto out;
}
- if (!is_fstree(new_root->root_key.objectid)) {
+ if (!is_fstree(btrfs_root_id(new_root))) {
ret = -ENOENT;
goto out_free;
}
@@ -3739,15 +3758,43 @@ static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
goto drop_write;
}
- down_write(&fs_info->subvol_sem);
-
switch (sa->cmd) {
case BTRFS_QUOTA_CTL_ENABLE:
case BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA:
+ down_write(&fs_info->subvol_sem);
ret = btrfs_quota_enable(fs_info, sa);
+ up_write(&fs_info->subvol_sem);
break;
case BTRFS_QUOTA_CTL_DISABLE:
+ /*
+ * Lock the cleaner mutex to prevent races with concurrent
+ * relocation, because relocation may be building backrefs for
+ * blocks of the quota root while we are deleting the root. This
+ * is like dropping fs roots of deleted snapshots/subvolumes, we
+ * need the same protection.
+ *
+ * This also prevents races between concurrent tasks trying to
+ * disable quotas, because we will unlock and relock
+ * qgroup_ioctl_lock across BTRFS_FS_QUOTA_ENABLED changes.
+ *
+ * We take this here because we have the dependency of
+ *
+ * inode_lock -> subvol_sem
+ *
+ * because of rename. With relocation we can prealloc extents,
+ * so that makes the dependency chain
+ *
+ * cleaner_mutex -> inode_lock -> subvol_sem
+ *
+ * so we must take the cleaner_mutex here before we take the
+ * subvol_sem. The deadlock can't actually happen, but this
+ * quiets lockdep.
+ */
+ mutex_lock(&fs_info->cleaner_mutex);
+ down_write(&fs_info->subvol_sem);
ret = btrfs_quota_disable(fs_info);
+ up_write(&fs_info->subvol_sem);
+ mutex_unlock(&fs_info->cleaner_mutex);
break;
default:
ret = -EINVAL;
@@ -3755,7 +3802,6 @@ static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
}
kfree(sa);
- up_write(&fs_info->subvol_sem);
drop_write:
mnt_drop_write_file(file);
return ret;
@@ -3901,7 +3947,7 @@ static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg)
qgroupid = sa->qgroupid;
if (!qgroupid) {
/* take the current subvol as qgroup */
- qgroupid = root->root_key.objectid;
+ qgroupid = btrfs_root_id(root);
}
ret = btrfs_limit_qgroup(trans, qgroupid, &sa->lim);
@@ -4032,7 +4078,7 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
!btrfs_is_empty_uuid(root_item->received_uuid)) {
ret = btrfs_uuid_tree_remove(trans, root_item->received_uuid,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
- root->root_key.objectid);
+ btrfs_root_id(root));
if (ret && ret != -ENOENT) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
@@ -4056,7 +4102,7 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) {
ret = btrfs_uuid_tree_add(trans, sa->uuid,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
- root->root_key.objectid);
+ btrfs_root_id(root));
if (ret < 0 && ret != -EEXIST) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 99ccab86bb86..6a0b7abb5bd9 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -97,7 +97,7 @@ void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int
void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buffer *eb)
{
if (test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state))
- btrfs_set_buffer_lockdep_class(root->root_key.objectid,
+ btrfs_set_buffer_lockdep_class(btrfs_root_id(root),
eb, btrfs_header_level(eb));
}
@@ -129,14 +129,14 @@ static void btrfs_set_eb_lock_owner(struct extent_buffer *eb, pid_t owner) { }
*/
/*
- * __btrfs_tree_read_lock - lock extent buffer for read
+ * btrfs_tree_read_lock_nested - lock extent buffer for read
* @eb: the eb to be locked
* @nest: the nesting level to be used for lockdep
*
* This takes the read lock on the extent buffer, using the specified nesting
* level for lockdep purposes.
*/
-void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
+void btrfs_tree_read_lock_nested(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
{
u64 start_ns = 0;
@@ -147,11 +147,6 @@ void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting ne
trace_btrfs_tree_read_lock(eb, start_ns);
}
-void btrfs_tree_read_lock(struct extent_buffer *eb)
-{
- __btrfs_tree_read_lock(eb, BTRFS_NESTING_NORMAL);
-}
-
/*
* Try-lock for read.
*
@@ -198,7 +193,7 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb)
*
* Returns with the eb->lock write locked.
*/
-void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
+void btrfs_tree_lock_nested(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
__acquires(&eb->lock)
{
u64 start_ns = 0;
@@ -211,11 +206,6 @@ void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
trace_btrfs_tree_lock(eb, start_ns);
}
-void btrfs_tree_lock(struct extent_buffer *eb)
-{
- __btrfs_tree_lock(eb, BTRFS_NESTING_NORMAL);
-}
-
/*
* Release the write lock.
*/
@@ -374,8 +364,12 @@ void btrfs_drew_write_lock(struct btrfs_drew_lock *lock)
void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock)
{
- atomic_dec(&lock->writers);
- cond_wake_up(&lock->pending_readers);
+ /*
+ * atomic_dec_and_test() implies a full barrier, so woken up readers are
+ * guaranteed to see the decrement.
+ */
+ if (atomic_dec_and_test(&lock->writers))
+ wake_up(&lock->pending_readers);
}
void btrfs_drew_read_lock(struct btrfs_drew_lock *lock)
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index 9576f485a300..1bc8e6738879 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -163,12 +163,22 @@ enum btrfs_lockdep_trans_states {
static_assert(BTRFS_NESTING_MAX <= MAX_LOCKDEP_SUBCLASSES,
"too many lock subclasses defined");
-void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest);
-void btrfs_tree_lock(struct extent_buffer *eb);
+void btrfs_tree_lock_nested(struct extent_buffer *eb, enum btrfs_lock_nesting nest);
+
+static inline void btrfs_tree_lock(struct extent_buffer *eb)
+{
+ btrfs_tree_lock_nested(eb, BTRFS_NESTING_NORMAL);
+}
+
void btrfs_tree_unlock(struct extent_buffer *eb);
-void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest);
-void btrfs_tree_read_lock(struct extent_buffer *eb);
+void btrfs_tree_read_lock_nested(struct extent_buffer *eb, enum btrfs_lock_nesting nest);
+
+static inline void btrfs_tree_read_lock(struct extent_buffer *eb)
+{
+ btrfs_tree_read_lock_nested(eb, BTRFS_NESTING_NORMAL);
+}
+
void btrfs_tree_read_unlock(struct extent_buffer *eb);
int btrfs_try_tree_read_lock(struct extent_buffer *eb);
int btrfs_try_tree_write_lock(struct extent_buffer *eb);
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index 3e5d3b7028e8..1c396ac167aa 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -130,17 +130,17 @@ static inline size_t read_compress_length(const char *buf)
*/
static int copy_compressed_data_to_page(char *compressed_data,
size_t compressed_size,
- struct page **out_pages,
- unsigned long max_nr_page,
+ struct folio **out_folios,
+ unsigned long max_nr_folio,
u32 *cur_out,
const u32 sectorsize)
{
u32 sector_bytes_left;
u32 orig_out;
- struct page *cur_page;
+ struct folio *cur_folio;
char *kaddr;
- if ((*cur_out / PAGE_SIZE) >= max_nr_page)
+ if ((*cur_out / PAGE_SIZE) >= max_nr_folio)
return -E2BIG;
/*
@@ -149,16 +149,16 @@ static int copy_compressed_data_to_page(char *compressed_data,
*/
ASSERT((*cur_out / sectorsize) == (*cur_out + LZO_LEN - 1) / sectorsize);
- cur_page = out_pages[*cur_out / PAGE_SIZE];
+ cur_folio = out_folios[*cur_out / PAGE_SIZE];
/* Allocate a new page */
- if (!cur_page) {
- cur_page = btrfs_alloc_compr_page();
- if (!cur_page)
+ if (!cur_folio) {
+ cur_folio = btrfs_alloc_compr_folio();
+ if (!cur_folio)
return -ENOMEM;
- out_pages[*cur_out / PAGE_SIZE] = cur_page;
+ out_folios[*cur_out / PAGE_SIZE] = cur_folio;
}
- kaddr = kmap_local_page(cur_page);
+ kaddr = kmap_local_folio(cur_folio, 0);
write_compress_length(kaddr + offset_in_page(*cur_out),
compressed_size);
*cur_out += LZO_LEN;
@@ -172,18 +172,18 @@ static int copy_compressed_data_to_page(char *compressed_data,
kunmap_local(kaddr);
- if ((*cur_out / PAGE_SIZE) >= max_nr_page)
+ if ((*cur_out / PAGE_SIZE) >= max_nr_folio)
return -E2BIG;
- cur_page = out_pages[*cur_out / PAGE_SIZE];
+ cur_folio = out_folios[*cur_out / PAGE_SIZE];
/* Allocate a new page */
- if (!cur_page) {
- cur_page = btrfs_alloc_compr_page();
- if (!cur_page)
+ if (!cur_folio) {
+ cur_folio = btrfs_alloc_compr_folio();
+ if (!cur_folio)
return -ENOMEM;
- out_pages[*cur_out / PAGE_SIZE] = cur_page;
+ out_folios[*cur_out / PAGE_SIZE] = cur_folio;
}
- kaddr = kmap_local_page(cur_page);
+ kaddr = kmap_local_folio(cur_folio, 0);
memcpy(kaddr + offset_in_page(*cur_out),
compressed_data + *cur_out - orig_out, copy_len);
@@ -209,15 +209,15 @@ out:
return 0;
}
-int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
- u64 start, struct page **pages, unsigned long *out_pages,
- unsigned long *total_in, unsigned long *total_out)
+int lzo_compress_folios(struct list_head *ws, struct address_space *mapping,
+ u64 start, struct folio **folios, unsigned long *out_folios,
+ unsigned long *total_in, unsigned long *total_out)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
const u32 sectorsize = inode_to_fs_info(mapping->host)->sectorsize;
- struct page *page_in = NULL;
+ struct folio *folio_in = NULL;
char *sizes_ptr;
- const unsigned long max_nr_page = *out_pages;
+ const unsigned long max_nr_folio = *out_folios;
int ret = 0;
/* Points to the file offset of input data */
u64 cur_in = start;
@@ -225,8 +225,8 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
u32 cur_out = 0;
u32 len = *total_out;
- ASSERT(max_nr_page > 0);
- *out_pages = 0;
+ ASSERT(max_nr_folio > 0);
+ *out_folios = 0;
*total_out = 0;
*total_in = 0;
@@ -243,15 +243,16 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
size_t out_len;
/* Get the input page first */
- if (!page_in) {
- page_in = find_get_page(mapping, cur_in >> PAGE_SHIFT);
- ASSERT(page_in);
+ if (!folio_in) {
+ ret = btrfs_compress_filemap_get_folio(mapping, cur_in, &folio_in);
+ if (ret < 0)
+ goto out;
}
/* Compress at most one sector of data each time */
in_len = min_t(u32, start + len - cur_in, sectorsize - sector_off);
ASSERT(in_len);
- data_in = kmap_local_page(page_in);
+ data_in = kmap_local_folio(folio_in, 0);
ret = lzo1x_1_compress(data_in +
offset_in_page(cur_in), in_len,
workspace->cbuf, &out_len,
@@ -264,7 +265,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
}
ret = copy_compressed_data_to_page(workspace->cbuf, out_len,
- pages, max_nr_page,
+ folios, max_nr_folio,
&cur_out, sectorsize);
if (ret < 0)
goto out;
@@ -282,13 +283,13 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
/* Check if we have reached page boundary */
if (PAGE_ALIGNED(cur_in)) {
- put_page(page_in);
- page_in = NULL;
+ folio_put(folio_in);
+ folio_in = NULL;
}
}
/* Store the size of all chunks of compressed data */
- sizes_ptr = kmap_local_page(pages[0]);
+ sizes_ptr = kmap_local_folio(folios[0], 0);
write_compress_length(sizes_ptr, cur_out);
kunmap_local(sizes_ptr);
@@ -296,9 +297,9 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
*total_out = cur_out;
*total_in = cur_in - start;
out:
- if (page_in)
- put_page(page_in);
- *out_pages = DIV_ROUND_UP(cur_out, PAGE_SIZE);
+ if (folio_in)
+ folio_put(folio_in);
+ *out_folios = DIV_ROUND_UP(cur_out, PAGE_SIZE);
return ret;
}
@@ -313,15 +314,15 @@ static void copy_compressed_segment(struct compressed_bio *cb,
u32 orig_in = *cur_in;
while (*cur_in < orig_in + len) {
- struct page *cur_page;
+ struct folio *cur_folio;
u32 copy_len = min_t(u32, PAGE_SIZE - offset_in_page(*cur_in),
orig_in + len - *cur_in);
ASSERT(copy_len);
- cur_page = cb->compressed_pages[*cur_in / PAGE_SIZE];
+ cur_folio = cb->compressed_folios[*cur_in / PAGE_SIZE];
- memcpy_from_page(dest + *cur_in - orig_in, cur_page,
- offset_in_page(*cur_in), copy_len);
+ memcpy_from_folio(dest + *cur_in - orig_in, cur_folio,
+ offset_in_folio(cur_folio, *cur_in), copy_len);
*cur_in += copy_len;
}
@@ -341,7 +342,7 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
/* Bytes decompressed so far */
u32 cur_out = 0;
- kaddr = kmap_local_page(cb->compressed_pages[0]);
+ kaddr = kmap_local_folio(cb->compressed_folios[0], 0);
len_in = read_compress_length(kaddr);
kunmap_local(kaddr);
cur_in += LZO_LEN;
@@ -363,7 +364,7 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
/* Go through each lzo segment */
while (cur_in < len_in) {
- struct page *cur_page;
+ struct folio *cur_folio;
/* Length of the compressed segment */
u32 seg_len;
u32 sector_bytes_left;
@@ -375,9 +376,9 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
*/
ASSERT(cur_in / sectorsize ==
(cur_in + LZO_LEN - 1) / sectorsize);
- cur_page = cb->compressed_pages[cur_in / PAGE_SIZE];
- ASSERT(cur_page);
- kaddr = kmap_local_page(cur_page);
+ cur_folio = cb->compressed_folios[cur_in / PAGE_SIZE];
+ ASSERT(cur_folio);
+ kaddr = kmap_local_folio(cur_folio, 0);
seg_len = read_compress_length(kaddr + offset_in_page(cur_in));
kunmap_local(kaddr);
cur_in += LZO_LEN;
diff --git a/fs/btrfs/messages.c b/fs/btrfs/messages.c
index c96dd66fd0f7..210d9c82e2ae 100644
--- a/fs/btrfs/messages.c
+++ b/fs/btrfs/messages.c
@@ -7,7 +7,7 @@
#ifdef CONFIG_PRINTK
-#define STATE_STRING_PREFACE ": state "
+#define STATE_STRING_PREFACE " state "
#define STATE_STRING_BUF_LEN (sizeof(STATE_STRING_PREFACE) + BTRFS_FS_STATE_COUNT + 1)
/*
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index b749ba45da2b..c5bdd674f55c 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -294,6 +294,12 @@ void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
spin_unlock_irq(&inode->ordered_tree_lock);
}
+void btrfs_mark_ordered_extent_error(struct btrfs_ordered_extent *ordered)
+{
+ if (!test_and_set_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
+ mapping_set_error(ordered->inode->i_mapping, -EIO);
+}
+
static void finish_ordered_fn(struct btrfs_work *work)
{
struct btrfs_ordered_extent *ordered_extent;
@@ -332,7 +338,7 @@ static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
if (WARN_ON_ONCE(len > ordered->bytes_left)) {
btrfs_crit(fs_info,
"bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%llu left=%llu",
- inode->root->root_key.objectid, btrfs_ino(inode),
+ btrfs_root_id(inode->root), btrfs_ino(inode),
ordered->file_offset, ordered->num_bytes,
len, ordered->bytes_left);
ordered->bytes_left = 0;
@@ -1188,6 +1194,7 @@ struct btrfs_ordered_extent *btrfs_split_ordered_extent(
ordered->disk_bytenr += len;
ordered->num_bytes -= len;
ordered->disk_num_bytes -= len;
+ ordered->ram_bytes -= len;
if (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags)) {
ASSERT(ordered->bytes_left == 0);
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 34413fc5b4bd..b6f6c6b91732 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -203,6 +203,7 @@ bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end,
struct extent_state **cached_state);
struct btrfs_ordered_extent *btrfs_split_ordered_extent(
struct btrfs_ordered_extent *ordered, u64 len);
+void btrfs_mark_ordered_extent_error(struct btrfs_ordered_extent *ordered);
int __init ordered_data_init(void);
void __cold ordered_data_exit(void);
diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
index 2a9b7b029eeb..155570e20f45 100644
--- a/fs/btrfs/props.c
+++ b/fs/btrfs/props.c
@@ -268,7 +268,7 @@ static void inode_prop_iterator(void *ctx,
btrfs_warn(root->fs_info,
"error applying prop %s to ino %llu (root %llu): %d",
handler->xattr_name, btrfs_ino(BTRFS_I(inode)),
- root->root_key.objectid, ret);
+ btrfs_root_id(root), ret);
else
set_bit(BTRFS_INODE_HAS_PROPS, &BTRFS_I(inode)->runtime_flags);
}
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 5f90f0605b12..eb28141d5c37 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1342,16 +1342,10 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
lockdep_assert_held_write(&fs_info->subvol_sem);
/*
- * Lock the cleaner mutex to prevent races with concurrent relocation,
- * because relocation may be building backrefs for blocks of the quota
- * root while we are deleting the root. This is like dropping fs roots
- * of deleted snapshots/subvolumes, we need the same protection.
- *
- * This also prevents races between concurrent tasks trying to disable
- * quotas, because we will unlock and relock qgroup_ioctl_lock across
- * BTRFS_FS_QUOTA_ENABLED changes.
+ * Relocation will mess with backrefs, so make sure we have the
+ * cleaner_mutex held to protect us from relocate.
*/
- mutex_lock(&fs_info->cleaner_mutex);
+ lockdep_assert_held(&fs_info->cleaner_mutex);
mutex_lock(&fs_info->qgroup_ioctl_lock);
if (!fs_info->quota_root)
@@ -1373,9 +1367,13 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
btrfs_qgroup_wait_for_completion(fs_info, false);
+ /*
+ * We have nothing held here and no trans handle, just return the error
+ * if there is one.
+ */
ret = flush_reservations(fs_info);
if (ret)
- goto out_unlock_cleaner;
+ return ret;
/*
* 1 For the root item
@@ -1439,9 +1437,6 @@ out:
btrfs_end_transaction(trans);
else if (trans)
ret = btrfs_commit_transaction(trans);
-out_unlock_cleaner:
- mutex_unlock(&fs_info->cleaner_mutex);
-
return ret;
}
@@ -1541,18 +1536,15 @@ static int quick_update_accounting(struct btrfs_fs_info *fs_info,
{
struct btrfs_qgroup *qgroup;
int ret = 1;
- int err = 0;
qgroup = find_qgroup_rb(fs_info, src);
if (!qgroup)
goto out;
if (qgroup->excl == qgroup->rfer) {
- ret = 0;
- err = __qgroup_excl_accounting(fs_info, dst, qgroup, sign);
- if (err < 0) {
- ret = err;
+ ret = __qgroup_excl_accounting(fs_info, dst, qgroup, sign);
+ if (ret < 0)
goto out;
- }
+ ret = 0;
}
out:
if (ret)
@@ -3050,6 +3042,8 @@ int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info,
struct btrfs_qgroup_inherit *inherit,
size_t size)
{
+ if (!btrfs_qgroup_enabled(fs_info))
+ return 0;
if (inherit->flags & ~BTRFS_QGROUP_INHERIT_FLAGS_SUPP)
return -EOPNOTSUPP;
if (size < sizeof(*inherit) || size > PAGE_SIZE)
@@ -3067,9 +3061,6 @@ int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info,
if (inherit->num_ref_copies > 0 || inherit->num_excl_copies > 0)
return -EINVAL;
- if (inherit->num_qgroups > PAGE_SIZE)
- return -EINVAL;
-
if (size != struct_size(inherit, qgroups, inherit->num_qgroups))
return -EINVAL;
@@ -3132,7 +3123,7 @@ static int qgroup_auto_inherit(struct btrfs_fs_info *fs_info,
qgids = res->qgroups;
list_for_each_entry(qg_list, &inode_qg->groups, next_group)
- qgids[i] = qg_list->group->qgroupid;
+ qgids[i++] = qg_list->group->qgroupid;
*inherit = res;
return 0;
@@ -3474,7 +3465,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
{
struct btrfs_qgroup *qgroup;
struct btrfs_fs_info *fs_info = root->fs_info;
- u64 ref_root = root->root_key.objectid;
+ u64 ref_root = btrfs_root_id(root);
int ret = 0;
LIST_HEAD(qgroup_list);
@@ -3709,7 +3700,6 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
qgroup_rescan_work);
struct btrfs_path *path;
struct btrfs_trans_handle *trans = NULL;
- int err = -ENOMEM;
int ret = 0;
bool stopped = false;
bool did_leaf_rescans = false;
@@ -3718,8 +3708,10 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
return;
path = btrfs_alloc_path();
- if (!path)
+ if (!path) {
+ ret = -ENOMEM;
goto out;
+ }
/*
* Rescan should only search for commit root, and any later difference
* should be recorded by qgroup
@@ -3727,18 +3719,17 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
path->search_commit_root = 1;
path->skip_locking = 1;
- err = 0;
- while (!err && !(stopped = rescan_should_stop(fs_info))) {
+ while (!ret && !(stopped = rescan_should_stop(fs_info))) {
trans = btrfs_start_transaction(fs_info->fs_root, 0);
if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
+ ret = PTR_ERR(trans);
break;
}
- err = qgroup_rescan_leaf(trans, path);
+ ret = qgroup_rescan_leaf(trans, path);
did_leaf_rescans = true;
- if (err > 0)
+ if (ret > 0)
btrfs_commit_transaction(trans);
else
btrfs_end_transaction(trans);
@@ -3748,10 +3739,10 @@ out:
btrfs_free_path(path);
mutex_lock(&fs_info->qgroup_rescan_lock);
- if (err > 0 &&
+ if (ret > 0 &&
fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
- } else if (err < 0 || stopped) {
+ } else if (ret < 0 || stopped) {
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
}
mutex_unlock(&fs_info->qgroup_rescan_lock);
@@ -3766,11 +3757,11 @@ out:
if (did_leaf_rescans) {
trans = btrfs_start_transaction(fs_info->quota_root, 1);
if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
+ ret = PTR_ERR(trans);
trans = NULL;
btrfs_err(fs_info,
"fail to start transaction for status update: %d",
- err);
+ ret);
}
} else {
trans = NULL;
@@ -3781,11 +3772,11 @@ out:
fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN)
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
if (trans) {
- ret = update_qgroup_status_item(trans);
- if (ret < 0) {
- err = ret;
- btrfs_err(fs_info, "fail to update qgroup status: %d",
- err);
+ int ret2 = update_qgroup_status_item(trans);
+
+ if (ret2 < 0) {
+ ret = ret2;
+ btrfs_err(fs_info, "fail to update qgroup status: %d", ret);
}
}
fs_info->qgroup_rescan_running = false;
@@ -3802,11 +3793,11 @@ out:
btrfs_info(fs_info, "qgroup scan paused");
} else if (fs_info->qgroup_flags & BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN) {
btrfs_info(fs_info, "qgroup scan cancelled");
- } else if (err >= 0) {
+ } else if (ret >= 0) {
btrfs_info(fs_info, "qgroup scan completed%s",
- err > 0 ? " (inconsistency flag cleared)" : "");
+ ret > 0 ? " (inconsistency flag cleared)" : "");
} else {
- btrfs_err(fs_info, "qgroup scan failed with %d", err);
+ btrfs_err(fs_info, "qgroup scan failed with %d", ret);
}
}
@@ -4115,7 +4106,7 @@ static int qgroup_reserve_data(struct btrfs_inode *inode,
int ret;
if (btrfs_qgroup_mode(root->fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
- !is_fstree(root->root_key.objectid) || len == 0)
+ !is_fstree(btrfs_root_id(root)) || len == 0)
return 0;
/* @reserved parameter is mandatory for qgroup */
@@ -4231,7 +4222,7 @@ static int qgroup_free_reserved_data(struct btrfs_inode *inode,
goto out;
freed += changeset.bytes_changed;
}
- btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid, freed,
+ btrfs_qgroup_free_refroot(root->fs_info, btrfs_root_id(root), freed,
BTRFS_QGROUP_RSV_DATA);
if (freed_ret)
*freed_ret = freed;
@@ -4272,7 +4263,7 @@ static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
changeset.bytes_changed, trace_op);
if (free)
btrfs_qgroup_free_refroot(inode->root->fs_info,
- inode->root->root_key.objectid,
+ btrfs_root_id(inode->root),
changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
if (released)
*released = changeset.bytes_changed;
@@ -4367,7 +4358,7 @@ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
int ret;
if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
- !is_fstree(root->root_key.objectid) || num_bytes == 0)
+ !is_fstree(btrfs_root_id(root)) || num_bytes == 0)
return 0;
BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
@@ -4412,13 +4403,13 @@ void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
struct btrfs_fs_info *fs_info = root->fs_info;
if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
- !is_fstree(root->root_key.objectid))
+ !is_fstree(btrfs_root_id(root)))
return;
/* TODO: Update trace point to handle such free */
trace_qgroup_meta_free_all_pertrans(root);
/* Special value -1 means to free all reserved space */
- btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid, (u64)-1,
+ btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), (u64)-1,
BTRFS_QGROUP_RSV_META_PERTRANS);
}
@@ -4428,7 +4419,7 @@ void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
struct btrfs_fs_info *fs_info = root->fs_info;
if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
- !is_fstree(root->root_key.objectid))
+ !is_fstree(btrfs_root_id(root)))
return;
/*
@@ -4439,8 +4430,7 @@ void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
num_bytes = sub_root_meta_rsv(root, num_bytes, type);
BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
trace_qgroup_meta_reserve(root, -(s64)num_bytes, type);
- btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid,
- num_bytes, type);
+ btrfs_qgroup_free_refroot(fs_info, btrfs_root_id(root), num_bytes, type);
}
static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
@@ -4488,13 +4478,15 @@ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
struct btrfs_fs_info *fs_info = root->fs_info;
if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED ||
- !is_fstree(root->root_key.objectid))
+ !is_fstree(btrfs_root_id(root)))
return;
/* Same as btrfs_qgroup_free_meta_prealloc() */
num_bytes = sub_root_meta_rsv(root, num_bytes,
BTRFS_QGROUP_RSV_META_PREALLOC);
trace_qgroup_meta_convert(root, num_bytes);
- qgroup_convert_meta(fs_info, root->root_key.objectid, num_bytes);
+ qgroup_convert_meta(fs_info, btrfs_root_id(root), num_bytes);
+ if (!sb_rdonly(fs_info->sb))
+ add_root_meta_rsv(root, num_bytes, BTRFS_QGROUP_RSV_META_PERTRANS);
}
/*
@@ -4521,7 +4513,7 @@ void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode)
btrfs_ino(inode), unode->val, unode->aux);
}
btrfs_qgroup_free_refroot(inode->root->fs_info,
- inode->root->root_key.objectid,
+ btrfs_root_id(inode->root),
changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
}
@@ -4707,7 +4699,7 @@ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
if (!btrfs_qgroup_full_accounting(fs_info))
return 0;
- if (!is_fstree(root->root_key.objectid) || !root->reloc_root)
+ if (!is_fstree(btrfs_root_id(root)) || !root->reloc_root)
return 0;
spin_lock(&blocks->lock);
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 6f4a9cfeea44..831fac45e70f 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -331,12 +331,11 @@ static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
static void merge_rbio(struct btrfs_raid_bio *dest,
struct btrfs_raid_bio *victim)
{
- bio_list_merge(&dest->bio_list, &victim->bio_list);
+ bio_list_merge_init(&dest->bio_list, &victim->bio_list);
dest->bio_list_bytes += victim->bio_list_bytes;
/* Also inherit the bitmaps from @victim. */
bitmap_or(&dest->dbitmap, &victim->dbitmap, &dest->dbitmap,
dest->stripe_nsectors);
- bio_list_init(&victim->bio_list);
}
/*
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index 8c4fc98ca9ce..cf531255ab76 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -673,7 +673,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
int ret = 0;
bool metadata;
u64 bytenr = generic_ref->bytenr;
- u64 num_bytes = generic_ref->len;
+ u64 num_bytes = generic_ref->num_bytes;
u64 parent = generic_ref->parent;
u64 ref_root = 0;
u64 owner = 0;
@@ -684,11 +684,11 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
if (generic_ref->type == BTRFS_REF_METADATA) {
if (!parent)
- ref_root = generic_ref->tree_ref.ref_root;
+ ref_root = generic_ref->ref_root;
owner = generic_ref->tree_ref.level;
} else if (!parent) {
- ref_root = generic_ref->data_ref.ref_root;
- owner = generic_ref->data_ref.ino;
+ ref_root = generic_ref->ref_root;
+ owner = generic_ref->data_ref.objectid;
offset = generic_ref->data_ref.offset;
}
metadata = owner < BTRFS_FIRST_FREE_OBJECTID;
diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c
index 08d0fb46ceec..d0a3fcecc46a 100644
--- a/fs/btrfs/reflink.c
+++ b/fs/btrfs/reflink.c
@@ -616,35 +616,6 @@ out:
return ret;
}
-static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
- struct inode *inode2, u64 loff2, u64 len)
-{
- unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1, NULL);
- unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1, NULL);
-}
-
-static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
- struct inode *inode2, u64 loff2, u64 len)
-{
- u64 range1_end = loff1 + len - 1;
- u64 range2_end = loff2 + len - 1;
-
- if (inode1 < inode2) {
- swap(inode1, inode2);
- swap(loff1, loff2);
- swap(range1_end, range2_end);
- } else if (inode1 == inode2 && loff2 < loff1) {
- swap(loff1, loff2);
- swap(range1_end, range2_end);
- }
-
- lock_extent(&BTRFS_I(inode1)->io_tree, loff1, range1_end, NULL);
- lock_extent(&BTRFS_I(inode2)->io_tree, loff2, range2_end, NULL);
-
- btrfs_assert_inode_range_clean(BTRFS_I(inode1), loff1, range1_end);
- btrfs_assert_inode_range_clean(BTRFS_I(inode2), loff2, range2_end);
-}
-
static void btrfs_double_mmap_lock(struct inode *inode1, struct inode *inode2)
{
if (inode1 < inode2)
@@ -662,17 +633,21 @@ static void btrfs_double_mmap_unlock(struct inode *inode1, struct inode *inode2)
static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len,
struct inode *dst, u64 dst_loff)
{
+ const u64 end = dst_loff + len - 1;
+ struct extent_state *cached_state = NULL;
struct btrfs_fs_info *fs_info = BTRFS_I(src)->root->fs_info;
const u64 bs = fs_info->sectorsize;
int ret;
/*
- * Lock destination range to serialize with concurrent readahead() and
- * source range to serialize with relocation.
+ * Lock destination range to serialize with concurrent readahead(), and
+ * we are safe from concurrency with relocation of source extents
+ * because we have already locked the inode's i_mmap_lock in exclusive
+ * mode.
*/
- btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
+ lock_extent(&BTRFS_I(dst)->io_tree, dst_loff, end, &cached_state);
ret = btrfs_clone(src, dst, loff, len, ALIGN(len, bs), dst_loff, 1);
- btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
+ unlock_extent(&BTRFS_I(dst)->io_tree, dst_loff, end, &cached_state);
btrfs_btree_balance_dirty(fs_info);
@@ -690,7 +665,7 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
if (root_dst->send_in_progress) {
btrfs_warn_rl(root_dst->fs_info,
"cannot deduplicate to root %llu while send operations are using it (%d in progress)",
- root_dst->root_key.objectid,
+ btrfs_root_id(root_dst),
root_dst->send_in_progress);
spin_unlock(&root_dst->root_item_lock);
return -EAGAIN;
@@ -724,6 +699,7 @@ out:
static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
u64 off, u64 olen, u64 destoff)
{
+ struct extent_state *cached_state = NULL;
struct inode *inode = file_inode(file);
struct inode *src = file_inode(file_src);
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
@@ -731,6 +707,7 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
int wb_ret;
u64 len = olen;
u64 bs = fs_info->sectorsize;
+ u64 end;
/*
* VFS's generic_remap_file_range_prep() protects us from cloning the
@@ -763,12 +740,15 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
}
/*
- * Lock destination range to serialize with concurrent readahead() and
- * source range to serialize with relocation.
+ * Lock destination range to serialize with concurrent readahead(), and
+ * we are safe from concurrency with relocation of source extents
+ * because we have already locked the inode's i_mmap_lock in exclusive
+ * mode.
*/
- btrfs_double_extent_lock(src, off, inode, destoff, len);
+ end = destoff + len - 1;
+ lock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state);
ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
- btrfs_double_extent_unlock(src, off, inode, destoff, len);
+ unlock_extent(&BTRFS_I(inode)->io_tree, destoff, end, &cached_state);
/*
* We may have copied an inline extent into a page of the destination
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index f96f267fb4aa..8b24bb5a0aa1 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -473,20 +473,19 @@ static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
struct btrfs_backref_node *node = NULL;
struct btrfs_backref_edge *edge;
int ret;
- int err = 0;
iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info);
if (!iter)
return ERR_PTR(-ENOMEM);
path = btrfs_alloc_path();
if (!path) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto out;
}
node = btrfs_backref_alloc_node(cache, bytenr, level);
if (!node) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto out;
}
@@ -497,10 +496,9 @@ static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
do {
ret = btrfs_backref_add_tree_node(trans, cache, path, iter,
node_key, cur);
- if (ret < 0) {
- err = ret;
+ if (ret < 0)
goto out;
- }
+
edge = list_first_entry_or_null(&cache->pending_edge,
struct btrfs_backref_edge, list[UPPER]);
/*
@@ -515,10 +513,8 @@ static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
/* Finish the upper linkage of newly added edges/nodes */
ret = btrfs_backref_finish_upper_links(cache, node);
- if (ret < 0) {
- err = ret;
+ if (ret < 0)
goto out;
- }
if (handle_useless_nodes(rc, node))
node = NULL;
@@ -526,9 +522,9 @@ out:
btrfs_free_path(iter->path);
kfree(iter);
btrfs_free_path(path);
- if (err) {
+ if (ret) {
btrfs_backref_error_cleanup(cache, node);
- return ERR_PTR(err);
+ return ERR_PTR(ret);
}
ASSERT(!node || !node->detached);
ASSERT(list_empty(&cache->useless_node) &&
@@ -754,7 +750,7 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
root_key.type = BTRFS_ROOT_ITEM_KEY;
root_key.offset = objectid;
- if (root->root_key.objectid == objectid) {
+ if (btrfs_root_id(root) == objectid) {
u64 commit_root_gen;
/* called by btrfs_init_reloc_root */
@@ -798,7 +794,7 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
btrfs_set_root_level(root_item, btrfs_header_level(eb));
btrfs_set_root_generation(root_item, trans->transid);
- if (root->root_key.objectid == objectid) {
+ if (btrfs_root_id(root) == objectid) {
btrfs_set_root_refs(root_item, 0);
memset(&root_item->drop_progress, 0,
sizeof(struct btrfs_disk_key));
@@ -876,8 +872,7 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
* We are merging reloc roots, we do not need new reloc trees. Also
* reloc trees never need their own reloc tree.
*/
- if (!rc->create_reloc_tree ||
- root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
+ if (!rc->create_reloc_tree || btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
return 0;
if (!trans->reloc_reserved) {
@@ -885,7 +880,7 @@ int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
trans->block_rsv = rc->block_rsv;
clear_rsv = 1;
}
- reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
+ reloc_root = create_reloc_root(trans, root, btrfs_root_id(root));
if (clear_rsv)
trans->block_rsv = rsv;
if (IS_ERR(reloc_root))
@@ -952,60 +947,6 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
}
/*
- * helper to find first cached inode with inode number >= objectid
- * in a subvolume
- */
-static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid)
-{
- struct rb_node *node;
- struct rb_node *prev;
- struct btrfs_inode *entry;
- struct inode *inode;
-
- spin_lock(&root->inode_lock);
-again:
- node = root->inode_tree.rb_node;
- prev = NULL;
- while (node) {
- prev = node;
- entry = rb_entry(node, struct btrfs_inode, rb_node);
-
- if (objectid < btrfs_ino(entry))
- node = node->rb_left;
- else if (objectid > btrfs_ino(entry))
- node = node->rb_right;
- else
- break;
- }
- if (!node) {
- while (prev) {
- entry = rb_entry(prev, struct btrfs_inode, rb_node);
- if (objectid <= btrfs_ino(entry)) {
- node = prev;
- break;
- }
- prev = rb_next(prev);
- }
- }
- while (node) {
- entry = rb_entry(node, struct btrfs_inode, rb_node);
- inode = igrab(&entry->vfs_inode);
- if (inode) {
- spin_unlock(&root->inode_lock);
- return inode;
- }
-
- objectid = btrfs_ino(entry) + 1;
- if (cond_resched_lock(&root->inode_lock))
- goto again;
-
- node = rb_next(node);
- }
- spin_unlock(&root->inode_lock);
- return NULL;
-}
-
-/*
* get new location of data
*/
static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
@@ -1065,7 +1006,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
struct btrfs_file_extent_item *fi;
- struct inode *inode = NULL;
+ struct btrfs_inode *inode = NULL;
u64 parent;
u64 bytenr;
u64 new_bytenr = 0;
@@ -1081,7 +1022,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
return 0;
/* reloc trees always use full backref */
- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID)
parent = leaf->start;
else
parent = 0;
@@ -1110,15 +1051,15 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
* if we are modifying block in fs tree, wait for read_folio
* to complete and drop the extent cache
*/
- if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
+ if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID) {
if (first) {
- inode = find_next_inode(root, key.objectid);
+ inode = btrfs_find_first_inode(root, key.objectid);
first = 0;
- } else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) {
- btrfs_add_delayed_iput(BTRFS_I(inode));
- inode = find_next_inode(root, key.objectid);
+ } else if (inode && btrfs_ino(inode) < key.objectid) {
+ btrfs_add_delayed_iput(inode);
+ inode = btrfs_find_first_inode(root, key.objectid);
}
- if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) {
+ if (inode && btrfs_ino(inode) == key.objectid) {
struct extent_state *cached_state = NULL;
end = key.offset +
@@ -1127,16 +1068,20 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
fs_info->sectorsize));
WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
end--;
- ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
- key.offset, end,
- &cached_state);
- if (!ret)
+ /* Take mmap lock to serialize with reflinks. */
+ if (!down_read_trylock(&inode->i_mmap_lock))
+ continue;
+ ret = try_lock_extent(&inode->io_tree, key.offset,
+ end, &cached_state);
+ if (!ret) {
+ up_read(&inode->i_mmap_lock);
continue;
+ }
- btrfs_drop_extent_map_range(BTRFS_I(inode),
- key.offset, end, true);
- unlock_extent(&BTRFS_I(inode)->io_tree,
- key.offset, end, &cached_state);
+ btrfs_drop_extent_map_range(inode, key.offset, end, true);
+ unlock_extent(&inode->io_tree, key.offset, end,
+ &cached_state);
+ up_read(&inode->i_mmap_lock);
}
}
@@ -1154,22 +1099,28 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
dirty = 1;
key.offset -= btrfs_file_extent_offset(leaf, fi);
- btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
- num_bytes, parent, root->root_key.objectid);
- btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
- key.objectid, key.offset,
- root->root_key.objectid, false);
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ ref.bytenr = new_bytenr;
+ ref.num_bytes = num_bytes;
+ ref.parent = parent;
+ ref.owning_root = btrfs_root_id(root);
+ ref.ref_root = btrfs_header_owner(leaf);
+ btrfs_init_data_ref(&ref, key.objectid, key.offset,
+ btrfs_root_id(root), false);
ret = btrfs_inc_extent_ref(trans, &ref);
if (ret) {
btrfs_abort_transaction(trans, ret);
break;
}
- btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
- num_bytes, parent, root->root_key.objectid);
- btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
- key.objectid, key.offset,
- root->root_key.objectid, false);
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ ref.bytenr = bytenr;
+ ref.num_bytes = num_bytes;
+ ref.parent = parent;
+ ref.owning_root = btrfs_root_id(root);
+ ref.ref_root = btrfs_header_owner(leaf);
+ btrfs_init_data_ref(&ref, key.objectid, key.offset,
+ btrfs_root_id(root), false);
ret = btrfs_free_extent(trans, &ref);
if (ret) {
btrfs_abort_transaction(trans, ret);
@@ -1179,7 +1130,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
if (dirty)
btrfs_mark_buffer_dirty(trans, leaf);
if (inode)
- btrfs_add_delayed_iput(BTRFS_I(inode));
+ btrfs_add_delayed_iput(inode);
return ret;
}
@@ -1225,8 +1176,8 @@ int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
int ret;
int slot;
- ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
- ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
+ ASSERT(btrfs_root_id(src) == BTRFS_TREE_RELOC_OBJECTID);
+ ASSERT(btrfs_root_id(dest) != BTRFS_TREE_RELOC_OBJECTID);
last_snapshot = btrfs_root_last_snapshot(&src->root_item);
again:
@@ -1378,20 +1329,26 @@ again:
path->slots[level], old_ptr_gen);
btrfs_mark_buffer_dirty(trans, path->nodes[level]);
- btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr,
- blocksize, path->nodes[level]->start,
- src->root_key.objectid);
- btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid,
- 0, true);
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ ref.bytenr = old_bytenr;
+ ref.num_bytes = blocksize;
+ ref.parent = path->nodes[level]->start;
+ ref.owning_root = btrfs_root_id(src);
+ ref.ref_root = btrfs_root_id(src);
+ btrfs_init_tree_ref(&ref, level - 1, 0, true);
ret = btrfs_inc_extent_ref(trans, &ref);
if (ret) {
btrfs_abort_transaction(trans, ret);
break;
}
- btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
- blocksize, 0, dest->root_key.objectid);
- btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid, 0,
- true);
+
+ ref.action = BTRFS_ADD_DELAYED_REF;
+ ref.bytenr = new_bytenr;
+ ref.num_bytes = blocksize;
+ ref.parent = 0;
+ ref.owning_root = btrfs_root_id(dest);
+ ref.ref_root = btrfs_root_id(dest);
+ btrfs_init_tree_ref(&ref, level - 1, 0, true);
ret = btrfs_inc_extent_ref(trans, &ref);
if (ret) {
btrfs_abort_transaction(trans, ret);
@@ -1399,10 +1356,13 @@ again:
}
/* We don't know the real owning_root, use 0. */
- btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, new_bytenr,
- blocksize, path->nodes[level]->start, 0);
- btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid,
- 0, true);
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ ref.bytenr = new_bytenr;
+ ref.num_bytes = blocksize;
+ ref.parent = path->nodes[level]->start;
+ ref.owning_root = 0;
+ ref.ref_root = btrfs_root_id(src);
+ btrfs_init_tree_ref(&ref, level - 1, 0, true);
ret = btrfs_free_extent(trans, &ref);
if (ret) {
btrfs_abort_transaction(trans, ret);
@@ -1410,10 +1370,13 @@ again:
}
/* We don't know the real owning_root, use 0. */
- btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, old_bytenr,
- blocksize, 0, 0);
- btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid,
- 0, true);
+ ref.action = BTRFS_DROP_DELAYED_REF;
+ ref.bytenr = old_bytenr;
+ ref.num_bytes = blocksize;
+ ref.parent = 0;
+ ref.owning_root = 0;
+ ref.ref_root = btrfs_root_id(dest);
+ btrfs_init_tree_ref(&ref, level - 1, 0, true);
ret = btrfs_free_extent(trans, &ref);
if (ret) {
btrfs_abort_transaction(trans, ret);
@@ -1521,7 +1484,7 @@ static int invalidate_extent_cache(struct btrfs_root *root,
const struct btrfs_key *max_key)
{
struct btrfs_fs_info *fs_info = root->fs_info;
- struct inode *inode = NULL;
+ struct btrfs_inode *inode = NULL;
u64 objectid;
u64 start, end;
u64 ino;
@@ -1531,23 +1494,24 @@ static int invalidate_extent_cache(struct btrfs_root *root,
struct extent_state *cached_state = NULL;
cond_resched();
- iput(inode);
+ if (inode)
+ iput(&inode->vfs_inode);
if (objectid > max_key->objectid)
break;
- inode = find_next_inode(root, objectid);
+ inode = btrfs_find_first_inode(root, objectid);
if (!inode)
break;
- ino = btrfs_ino(BTRFS_I(inode));
+ ino = btrfs_ino(inode);
if (ino > max_key->objectid) {
- iput(inode);
+ iput(&inode->vfs_inode);
break;
}
objectid = ino + 1;
- if (!S_ISREG(inode->i_mode))
+ if (!S_ISREG(inode->vfs_inode.i_mode))
continue;
if (unlikely(min_key->objectid == ino)) {
@@ -1580,9 +1544,9 @@ static int invalidate_extent_cache(struct btrfs_root *root,
}
/* the lock_extent waits for read_folio to complete */
- lock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
- btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, true);
- unlock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
+ lock_extent(&inode->io_tree, start, end, &cached_state);
+ btrfs_drop_extent_map_range(inode, start, end, true);
+ unlock_extent(&inode->io_tree, start, end, &cached_state);
}
return 0;
}
@@ -1617,7 +1581,7 @@ static int insert_dirty_subvol(struct btrfs_trans_handle *trans,
int ret;
/* @root must be a subvolume tree root with a valid reloc tree */
- ASSERT(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
+ ASSERT(btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID);
ASSERT(reloc_root);
reloc_root_item = &reloc_root->root_item;
@@ -1646,7 +1610,7 @@ static int clean_dirty_subvols(struct reloc_control *rc)
list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots,
reloc_dirty_list) {
- if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
+ if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID) {
/* Merged subvolume, cleanup its reloc root */
struct btrfs_root *reloc_root = root->reloc_root;
@@ -1921,13 +1885,13 @@ again:
if (root->reloc_root) {
btrfs_err(fs_info,
"reloc tree mismatch, root %lld has reloc root key (%lld %u %llu) gen %llu, expect reloc root key (%lld %u %llu) gen %llu",
- root->root_key.objectid,
- root->reloc_root->root_key.objectid,
+ btrfs_root_id(root),
+ btrfs_root_id(root->reloc_root),
root->reloc_root->root_key.type,
root->reloc_root->root_key.offset,
btrfs_root_generation(
&root->reloc_root->root_item),
- reloc_root->root_key.objectid,
+ btrfs_root_id(reloc_root),
reloc_root->root_key.type,
reloc_root->root_key.offset,
btrfs_root_generation(
@@ -1935,8 +1899,8 @@ again:
} else {
btrfs_err(fs_info,
"reloc tree mismatch, root %lld has no reloc root, expect reloc root key (%lld %u %llu) gen %llu",
- root->root_key.objectid,
- reloc_root->root_key.objectid,
+ btrfs_root_id(root),
+ btrfs_root_id(reloc_root),
reloc_root->root_key.type,
reloc_root->root_key.offset,
btrfs_root_generation(
@@ -2193,7 +2157,7 @@ struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
return ERR_PTR(-EUCLEAN);
}
- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID) {
ret = record_reloc_root_in_trans(trans, root);
if (ret)
return ERR_PTR(ret);
@@ -2300,7 +2264,7 @@ struct btrfs_root *select_one_root(struct btrfs_backref_node *node)
if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
return root;
- if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID)
+ if (btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID)
fs_root = root;
if (next != node)
@@ -2316,9 +2280,8 @@ struct btrfs_root *select_one_root(struct btrfs_backref_node *node)
return fs_root;
}
-static noinline_for_stack
-u64 calcu_metadata_size(struct reloc_control *rc,
- struct btrfs_backref_node *node, int reserve)
+static noinline_for_stack u64 calcu_metadata_size(struct reloc_control *rc,
+ struct btrfs_backref_node *node)
{
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct btrfs_backref_node *next = node;
@@ -2327,12 +2290,12 @@ u64 calcu_metadata_size(struct reloc_control *rc,
u64 num_bytes = 0;
int index = 0;
- BUG_ON(reserve && node->processed);
+ BUG_ON(node->processed);
while (next) {
cond_resched();
while (1) {
- if (next->processed && (reserve || next != node))
+ if (next->processed)
break;
num_bytes += fs_info->nodesize;
@@ -2360,7 +2323,7 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans,
int ret;
u64 tmp;
- num_bytes = calcu_metadata_size(rc, node, 1) * 2;
+ num_bytes = calcu_metadata_size(rc, node) * 2;
trans->block_rsv = rc->block_rsv;
rc->reserved_bytes += num_bytes;
@@ -2423,8 +2386,6 @@ static int do_relocation(struct btrfs_trans_handle *trans,
path->lowest_level = node->level + 1;
rc->backref_cache.path[node->level] = node;
list_for_each_entry(edge, &node->upper, list[LOWER]) {
- struct btrfs_ref ref = { 0 };
-
cond_resched();
upper = edge->node[UPPER];
@@ -2512,19 +2473,23 @@ static int do_relocation(struct btrfs_trans_handle *trans,
*/
ASSERT(node->eb == eb);
} else {
+ struct btrfs_ref ref = {
+ .action = BTRFS_ADD_DELAYED_REF,
+ .bytenr = node->eb->start,
+ .num_bytes = blocksize,
+ .parent = upper->eb->start,
+ .owning_root = btrfs_header_owner(upper->eb),
+ .ref_root = btrfs_header_owner(upper->eb),
+ };
+
btrfs_set_node_blockptr(upper->eb, slot,
node->eb->start);
btrfs_set_node_ptr_generation(upper->eb, slot,
trans->transid);
btrfs_mark_buffer_dirty(trans, upper->eb);
- btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
- node->eb->start, blocksize,
- upper->eb->start,
- btrfs_header_owner(upper->eb));
btrfs_init_tree_ref(&ref, node->level,
- btrfs_header_owner(upper->eb),
- root->root_key.objectid, false);
+ btrfs_root_id(root), false);
ret = btrfs_inc_extent_ref(trans, &ref);
if (!ret)
ret = btrfs_drop_subtree(trans, root, eb,
@@ -2776,12 +2741,11 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
struct btrfs_path *path;
struct tree_block *block;
struct tree_block *next;
- int ret;
- int err = 0;
+ int ret = 0;
path = btrfs_alloc_path();
if (!path) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto out_free_blocks;
}
@@ -2796,8 +2760,8 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
/* Get first keys */
rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
if (!block->key_ready) {
- err = get_tree_block_key(fs_info, block);
- if (err)
+ ret = get_tree_block_key(fs_info, block);
+ if (ret)
goto out_free_path;
}
}
@@ -2807,25 +2771,23 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
node = build_backref_tree(trans, rc, &block->key,
block->level, block->bytenr);
if (IS_ERR(node)) {
- err = PTR_ERR(node);
+ ret = PTR_ERR(node);
goto out;
}
ret = relocate_tree_block(trans, rc, node, &block->key,
path);
- if (ret < 0) {
- err = ret;
+ if (ret < 0)
break;
- }
}
out:
- err = finish_pending_nodes(trans, rc, path, err);
+ ret = finish_pending_nodes(trans, rc, path, ret);
out_free_path:
btrfs_free_path(path);
out_free_blocks:
free_block_list(blocks);
- return err;
+ return ret;
}
static noinline_for_stack int prealloc_file_extent_cluster(
@@ -2850,7 +2812,7 @@ static noinline_for_stack int prealloc_file_extent_cluster(
* btrfs_do_readpage() call of previously relocated file cluster.
*
* If the current cluster starts in the above range, btrfs_do_readpage()
- * will skip the read, and relocate_one_page() will later writeback
+ * will skip the read, and relocate_one_folio() will later writeback
* the padding zeros as new data, causing data corruption.
*
* Here we have to manually invalidate the range (i_size, PAGE_END + 1).
@@ -2859,7 +2821,7 @@ static noinline_for_stack int prealloc_file_extent_cluster(
struct address_space *mapping = inode->vfs_inode.i_mapping;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
const u32 sectorsize = fs_info->sectorsize;
- struct page *page;
+ struct folio *folio;
ASSERT(sectorsize < PAGE_SIZE);
ASSERT(IS_ALIGNED(i_size, sectorsize));
@@ -2890,16 +2852,16 @@ static noinline_for_stack int prealloc_file_extent_cluster(
clear_extent_bits(&inode->io_tree, i_size,
round_up(i_size, PAGE_SIZE) - 1,
EXTENT_UPTODATE);
- page = find_lock_page(mapping, i_size >> PAGE_SHIFT);
+ folio = filemap_lock_folio(mapping, i_size >> PAGE_SHIFT);
/*
* If page is freed we don't need to do anything then, as we
* will re-read the whole page anyway.
*/
- if (page) {
- btrfs_subpage_clear_uptodate(fs_info, page_folio(page), i_size,
+ if (!IS_ERR(folio)) {
+ btrfs_subpage_clear_uptodate(fs_info, folio, i_size,
round_up(i_size, PAGE_SIZE) - i_size);
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
}
}
@@ -2984,68 +2946,71 @@ static u64 get_cluster_boundary_end(const struct file_extent_cluster *cluster,
return cluster->boundary[cluster_nr + 1] - 1;
}
-static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
- const struct file_extent_cluster *cluster,
- int *cluster_nr, unsigned long page_index)
+static int relocate_one_folio(struct inode *inode, struct file_ra_state *ra,
+ const struct file_extent_cluster *cluster,
+ int *cluster_nr, unsigned long index)
{
struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
u64 offset = BTRFS_I(inode)->index_cnt;
const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT;
gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
- struct page *page;
- u64 page_start;
- u64 page_end;
+ struct folio *folio;
+ u64 folio_start;
+ u64 folio_end;
u64 cur;
int ret;
- ASSERT(page_index <= last_index);
- page = find_lock_page(inode->i_mapping, page_index);
- if (!page) {
+ ASSERT(index <= last_index);
+ folio = filemap_lock_folio(inode->i_mapping, index);
+ if (IS_ERR(folio)) {
page_cache_sync_readahead(inode->i_mapping, ra, NULL,
- page_index, last_index + 1 - page_index);
- page = find_or_create_page(inode->i_mapping, page_index, mask);
- if (!page)
- return -ENOMEM;
+ index, last_index + 1 - index);
+ folio = __filemap_get_folio(inode->i_mapping, index,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, mask);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
}
- if (PageReadahead(page))
+ WARN_ON(folio_order(folio));
+
+ if (folio_test_readahead(folio))
page_cache_async_readahead(inode->i_mapping, ra, NULL,
- page_folio(page), page_index,
- last_index + 1 - page_index);
+ folio, index,
+ last_index + 1 - index);
- if (!PageUptodate(page)) {
- btrfs_read_folio(NULL, page_folio(page));
- lock_page(page);
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
+ btrfs_read_folio(NULL, folio);
+ folio_lock(folio);
+ if (!folio_test_uptodate(folio)) {
ret = -EIO;
- goto release_page;
+ goto release_folio;
}
}
/*
- * We could have lost page private when we dropped the lock to read the
- * page above, make sure we set_page_extent_mapped here so we have any
+ * We could have lost folio private when we dropped the lock to read the
+ * folio above, make sure we set_page_extent_mapped here so we have any
* of the subpage blocksize stuff we need in place.
*/
- ret = set_page_extent_mapped(page);
+ ret = set_folio_extent_mapped(folio);
if (ret < 0)
- goto release_page;
+ goto release_folio;
- page_start = page_offset(page);
- page_end = page_start + PAGE_SIZE - 1;
+ folio_start = folio_pos(folio);
+ folio_end = folio_start + PAGE_SIZE - 1;
/*
* Start from the cluster, as for subpage case, the cluster can start
- * inside the page.
+ * inside the folio.
*/
- cur = max(page_start, cluster->boundary[*cluster_nr] - offset);
- while (cur <= page_end) {
+ cur = max(folio_start, cluster->boundary[*cluster_nr] - offset);
+ while (cur <= folio_end) {
struct extent_state *cached_state = NULL;
u64 extent_start = cluster->boundary[*cluster_nr] - offset;
u64 extent_end = get_cluster_boundary_end(cluster,
*cluster_nr) - offset;
- u64 clamped_start = max(page_start, extent_start);
- u64 clamped_end = min(page_end, extent_end);
+ u64 clamped_start = max(folio_start, extent_start);
+ u64 clamped_end = min(folio_end, extent_end);
u32 clamped_len = clamped_end + 1 - clamped_start;
/* Reserve metadata for this range */
@@ -3053,7 +3018,7 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
clamped_len, clamped_len,
false);
if (ret)
- goto release_page;
+ goto release_folio;
/* Mark the range delalloc and dirty for later writeback */
lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
@@ -3069,20 +3034,18 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
clamped_len, true);
btrfs_delalloc_release_extents(BTRFS_I(inode),
clamped_len);
- goto release_page;
+ goto release_folio;
}
- btrfs_folio_set_dirty(fs_info, page_folio(page),
- clamped_start, clamped_len);
+ btrfs_folio_set_dirty(fs_info, folio, clamped_start, clamped_len);
/*
- * Set the boundary if it's inside the page.
+ * Set the boundary if it's inside the folio.
* Data relocation requires the destination extents to have the
* same size as the source.
* EXTENT_BOUNDARY bit prevents current extent from being merged
* with previous extent.
*/
- if (in_range(cluster->boundary[*cluster_nr] - offset,
- page_start, PAGE_SIZE)) {
+ if (in_range(cluster->boundary[*cluster_nr] - offset, folio_start, PAGE_SIZE)) {
u64 boundary_start = cluster->boundary[*cluster_nr] -
offset;
u64 boundary_end = boundary_start +
@@ -3105,8 +3068,8 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
break;
}
}
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
balance_dirty_pages_ratelimited(inode->i_mapping);
btrfs_throttle(fs_info);
@@ -3114,9 +3077,9 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
ret = -ECANCELED;
return ret;
-release_page:
- unlock_page(page);
- put_page(page);
+release_folio:
+ folio_unlock(folio);
+ folio_put(folio);
return ret;
}
@@ -3151,7 +3114,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
last_index = (cluster->end - offset) >> PAGE_SHIFT;
for (index = (cluster->start - offset) >> PAGE_SHIFT;
index <= last_index && !ret; index++)
- ret = relocate_one_page(inode, ra, cluster, &cluster_nr, index);
+ ret = relocate_one_folio(inode, ra, cluster, &cluster_nr, index);
if (ret == 0)
WARN_ON(cluster_nr != cluster->nr);
out:
@@ -3928,7 +3891,7 @@ static noinline_for_stack struct inode *create_reloc_inode(
struct btrfs_trans_handle *trans;
struct btrfs_root *root;
u64 objectid;
- int err = 0;
+ int ret = 0;
root = btrfs_grab_root(fs_info->data_reloc_root);
trans = btrfs_start_transaction(root, 6);
@@ -3937,31 +3900,31 @@ static noinline_for_stack struct inode *create_reloc_inode(
return ERR_CAST(trans);
}
- err = btrfs_get_free_objectid(root, &objectid);
- if (err)
+ ret = btrfs_get_free_objectid(root, &objectid);
+ if (ret)
goto out;
- err = __insert_orphan_inode(trans, root, objectid);
- if (err)
+ ret = __insert_orphan_inode(trans, root, objectid);
+ if (ret)
goto out;
inode = btrfs_iget(fs_info->sb, objectid, root);
if (IS_ERR(inode)) {
delete_orphan_inode(trans, root, objectid);
- err = PTR_ERR(inode);
+ ret = PTR_ERR(inode);
inode = NULL;
goto out;
}
BTRFS_I(inode)->index_cnt = group->start;
- err = btrfs_orphan_add(trans, BTRFS_I(inode));
+ ret = btrfs_orphan_add(trans, BTRFS_I(inode));
out:
btrfs_put_root(root);
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
- if (err) {
+ if (ret) {
iput(inode);
- inode = ERR_PTR(err);
+ inode = ERR_PTR(ret);
}
return inode;
}
@@ -4439,9 +4402,11 @@ int btrfs_reloc_clone_csums(struct btrfs_ordered_extent *ordered)
ret = btrfs_lookup_csums_list(csum_root, disk_bytenr,
disk_bytenr + ordered->num_bytes - 1,
- &list, 0, false);
- if (ret)
+ &list, false);
+ if (ret < 0) {
+ btrfs_mark_ordered_extent_error(ordered);
return ret;
+ }
while (!list_empty(&list)) {
struct btrfs_ordered_sum *sums =
@@ -4491,8 +4456,7 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
btrfs_root_last_snapshot(&root->root_item))
first_cow = 1;
- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID &&
- rc->create_reloc_tree) {
+ if (btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID && rc->create_reloc_tree) {
WARN_ON(!first_cow && level == 0);
node = rc->backref_cache.path[level];
@@ -4585,8 +4549,7 @@ int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
}
new_root = pending->snap;
- reloc_root = create_reloc_root(trans, root->reloc_root,
- new_root->root_key.objectid);
+ reloc_root = create_reloc_root(trans, root->reloc_root, btrfs_root_id(new_root));
if (IS_ERR(reloc_root))
return PTR_ERR(reloc_root);
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 4bb538a372ce..33962671a96c 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -148,8 +148,7 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
if (ret > 0) {
btrfs_crit(fs_info,
"unable to find root key (%llu %u %llu) in tree %llu",
- key->objectid, key->type, key->offset,
- root->root_key.objectid);
+ key->objectid, key->type, key->offset, btrfs_root_id(root));
ret = -EUCLEAN;
btrfs_abort_transaction(trans, ret);
goto out;
@@ -548,13 +547,3 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
}
return ret;
}
-
-void btrfs_subvolume_release_metadata(struct btrfs_root *root,
- struct btrfs_block_rsv *rsv)
-{
- struct btrfs_fs_info *fs_info = root->fs_info;
- u64 qgroup_to_release;
-
- btrfs_block_rsv_release(fs_info, rsv, (u64)-1, &qgroup_to_release);
- btrfs_qgroup_convert_reserved_meta(root, qgroup_to_release);
-}
diff --git a/fs/btrfs/root-tree.h b/fs/btrfs/root-tree.h
index 6f929cf3bd49..8f5739e732b9 100644
--- a/fs/btrfs/root-tree.h
+++ b/fs/btrfs/root-tree.h
@@ -18,8 +18,6 @@ struct btrfs_trans_handle;
int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
struct btrfs_block_rsv *rsv,
int nitems, bool use_global_rsv);
-void btrfs_subvolume_release_metadata(struct btrfs_root *root,
- struct btrfs_block_rsv *rsv);
int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
u64 ref_id, u64 dirid, u64 sequence,
const struct fscrypt_str *name);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index c4bd0e60db59..4b22cfe9a98c 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1012,6 +1012,7 @@ static void scrub_stripe_read_repair_worker(struct work_struct *work)
struct btrfs_fs_info *fs_info = sctx->fs_info;
int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
stripe->bg->length);
+ unsigned long repaired;
int mirror;
int i;
@@ -1078,16 +1079,15 @@ out:
* Submit the repaired sectors. For zoned case, we cannot do repair
* in-place, but queue the bg to be relocated.
*/
- if (btrfs_is_zoned(fs_info)) {
- if (!bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
+ bitmap_andnot(&repaired, &stripe->init_error_bitmap, &stripe->error_bitmap,
+ stripe->nr_sectors);
+ if (!sctx->readonly && !bitmap_empty(&repaired, stripe->nr_sectors)) {
+ if (btrfs_is_zoned(fs_info)) {
btrfs_repair_one_zone(fs_info, sctx->stripes[0].bg->start);
- } else if (!sctx->readonly) {
- unsigned long repaired;
-
- bitmap_andnot(&repaired, &stripe->init_error_bitmap,
- &stripe->error_bitmap, stripe->nr_sectors);
- scrub_write_sectors(sctx, stripe, repaired, false);
- wait_scrub_stripe_io(stripe);
+ } else {
+ scrub_write_sectors(sctx, stripe, repaired, false);
+ wait_scrub_stripe_io(stripe);
+ }
}
scrub_stripe_report_errors(sctx, stripe);
@@ -2812,7 +2812,17 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
gen = btrfs_get_last_trans_committed(fs_info);
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
- bytenr = btrfs_sb_offset(i);
+ ret = btrfs_sb_log_location(scrub_dev, i, 0, &bytenr);
+ if (ret == -ENOENT)
+ break;
+
+ if (ret) {
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.super_errors++;
+ spin_unlock(&sctx->stat_lock);
+ continue;
+ }
+
if (bytenr + BTRFS_SUPER_INFO_SIZE >
scrub_dev->commit_total_bytes)
break;
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 50b4a76ac88e..3dd4a48479a9 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -392,9 +392,8 @@ static void inconsistent_snapshot_error(struct send_ctx *sctx,
btrfs_err(sctx->send_root->fs_info,
"Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu",
result_string, what, sctx->cmp_key->objectid,
- sctx->send_root->root_key.objectid,
- (sctx->parent_root ?
- sctx->parent_root->root_key.objectid : 0));
+ btrfs_root_id(sctx->send_root),
+ (sctx->parent_root ? btrfs_root_id(sctx->parent_root) : 0));
}
__maybe_unused
@@ -1316,9 +1315,9 @@ static int __clone_root_cmp_bsearch(const void *key, const void *elt)
u64 root = (u64)(uintptr_t)key;
const struct clone_root *cr = elt;
- if (root < cr->root->root_key.objectid)
+ if (root < btrfs_root_id(cr->root))
return -1;
- if (root > cr->root->root_key.objectid)
+ if (root > btrfs_root_id(cr->root))
return 1;
return 0;
}
@@ -1328,9 +1327,9 @@ static int __clone_root_cmp_sort(const void *e1, const void *e2)
const struct clone_root *cr1 = e1;
const struct clone_root *cr2 = e2;
- if (cr1->root->root_key.objectid < cr2->root->root_key.objectid)
+ if (btrfs_root_id(cr1->root) < btrfs_root_id(cr2->root))
return -1;
- if (cr1->root->root_key.objectid > cr2->root->root_key.objectid)
+ if (btrfs_root_id(cr1->root) > btrfs_root_id(cr2->root))
return 1;
return 0;
}
@@ -1778,7 +1777,7 @@ static int read_symlink(struct btrfs_root *root,
*/
btrfs_err(root->fs_info,
"Found empty symlink inode %llu at root %llu",
- ino, root->root_key.objectid);
+ ino, btrfs_root_id(root));
ret = -EIO;
goto out;
}
@@ -2532,7 +2531,7 @@ static int send_subvol_begin(struct send_ctx *sctx)
return -ENOMEM;
}
- key.objectid = send_root->root_key.objectid;
+ key.objectid = btrfs_root_id(send_root);
key.type = BTRFS_ROOT_BACKREF_KEY;
key.offset = 0;
@@ -2548,7 +2547,7 @@ static int send_subvol_begin(struct send_ctx *sctx)
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (key.type != BTRFS_ROOT_BACKREF_KEY ||
- key.objectid != send_root->root_key.objectid) {
+ key.objectid != btrfs_root_id(send_root)) {
ret = -ENOENT;
goto out;
}
@@ -5274,10 +5273,11 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
{
struct btrfs_root *root = sctx->send_root;
struct btrfs_fs_info *fs_info = root->fs_info;
- struct page *page;
+ struct folio *folio;
pgoff_t index = offset >> PAGE_SHIFT;
pgoff_t last_index;
unsigned pg_offset = offset_in_page(offset);
+ struct address_space *mapping = sctx->cur_inode->i_mapping;
int ret;
ret = put_data_header(sctx, len);
@@ -5290,44 +5290,44 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
unsigned cur_len = min_t(unsigned, len,
PAGE_SIZE - pg_offset);
- page = find_lock_page(sctx->cur_inode->i_mapping, index);
- if (!page) {
- page_cache_sync_readahead(sctx->cur_inode->i_mapping,
+ folio = filemap_lock_folio(mapping, index);
+ if (IS_ERR(folio)) {
+ page_cache_sync_readahead(mapping,
&sctx->ra, NULL, index,
last_index + 1 - index);
- page = find_or_create_page(sctx->cur_inode->i_mapping,
- index, GFP_KERNEL);
- if (!page) {
- ret = -ENOMEM;
+ folio = filemap_grab_folio(mapping, index);
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
break;
}
}
- if (PageReadahead(page))
- page_cache_async_readahead(sctx->cur_inode->i_mapping,
- &sctx->ra, NULL, page_folio(page),
+ WARN_ON(folio_order(folio));
+
+ if (folio_test_readahead(folio))
+ page_cache_async_readahead(mapping, &sctx->ra, NULL, folio,
index, last_index + 1 - index);
- if (!PageUptodate(page)) {
- btrfs_read_folio(NULL, page_folio(page));
- lock_page(page);
- if (!PageUptodate(page)) {
- unlock_page(page);
+ if (!folio_test_uptodate(folio)) {
+ btrfs_read_folio(NULL, folio);
+ folio_lock(folio);
+ if (!folio_test_uptodate(folio)) {
+ folio_unlock(folio);
btrfs_err(fs_info,
"send: IO error at offset %llu for inode %llu root %llu",
- page_offset(page), sctx->cur_ino,
- sctx->send_root->root_key.objectid);
- put_page(page);
+ folio_pos(folio), sctx->cur_ino,
+ btrfs_root_id(sctx->send_root));
+ folio_put(folio);
ret = -EIO;
break;
}
}
- memcpy_from_page(sctx->send_buf + sctx->send_size, page,
- pg_offset, cur_len);
- unlock_page(page);
- put_page(page);
+ memcpy_from_folio(sctx->send_buf + sctx->send_size, folio,
+ pg_offset, cur_len);
+ folio_unlock(folio);
+ folio_put(folio);
index++;
pg_offset = 0;
len -= cur_len;
@@ -5388,7 +5388,7 @@ static int send_clone(struct send_ctx *sctx,
btrfs_debug(sctx->send_root->fs_info,
"send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu",
- offset, len, clone_root->root->root_key.objectid,
+ offset, len, btrfs_root_id(clone_root->root),
clone_root->ino, clone_root->offset);
p = fs_path_alloc();
@@ -7337,7 +7337,7 @@ static int search_key_again(const struct send_ctx *sctx,
"send: key (%llu %u %llu) not found in %s root %llu, lowest_level %d, slot %d",
key->objectid, key->type, key->offset,
(root == sctx->parent_root ? "parent" : "send"),
- root->root_key.objectid, path->lowest_level,
+ btrfs_root_id(root), path->lowest_level,
path->slots[path->lowest_level]);
return -EUCLEAN;
}
@@ -8071,7 +8071,7 @@ static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
if (root->send_in_progress < 0)
btrfs_err(root->fs_info,
"send_in_progress unbalanced %d root %llu",
- root->send_in_progress, root->root_key.objectid);
+ root->send_in_progress, btrfs_root_id(root));
spin_unlock(&root->root_item_lock);
}
@@ -8079,7 +8079,7 @@ static void dedupe_in_progress_warn(const struct btrfs_root *root)
{
btrfs_warn_rl(root->fs_info,
"cannot use root %llu for send while deduplications on it are in progress (%d in progress)",
- root->root_key.objectid, root->dedupe_in_progress);
+ btrfs_root_id(root), root->dedupe_in_progress);
}
long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 7e44ccaf348f..2dbc930a20f7 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1097,10 +1097,9 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
#endif
if (btrfs_test_opt(info, REF_VERIFY))
seq_puts(seq, ",ref_verify");
- seq_printf(seq, ",subvolid=%llu",
- BTRFS_I(d_inode(dentry))->root->root_key.objectid);
+ seq_printf(seq, ",subvolid=%llu", btrfs_root_id(BTRFS_I(d_inode(dentry))->root));
subvol_name = btrfs_get_subvol_name_from_objectid(info,
- BTRFS_I(d_inode(dentry))->root->root_key.objectid);
+ btrfs_root_id(BTRFS_I(d_inode(dentry))->root));
if (!IS_ERR(subvol_name)) {
seq_puts(seq, ",subvol=");
seq_escape(seq, subvol_name, " \t\n\\");
@@ -1152,7 +1151,7 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
struct super_block *s = root->d_sb;
struct btrfs_fs_info *fs_info = btrfs_sb(s);
struct inode *root_inode = d_inode(root);
- u64 root_objectid = BTRFS_I(root_inode)->root->root_key.objectid;
+ u64 root_objectid = btrfs_root_id(BTRFS_I(root_inode)->root);
ret = 0;
if (!is_subvolume_inode(root_inode)) {
@@ -1774,10 +1773,8 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]);
buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]);
/* Mask in the root object ID too, to disambiguate subvols */
- buf->f_fsid.val[0] ^=
- BTRFS_I(d_inode(dentry))->root->root_key.objectid >> 32;
- buf->f_fsid.val[1] ^=
- BTRFS_I(d_inode(dentry))->root->root_key.objectid;
+ buf->f_fsid.val[0] ^= btrfs_root_id(BTRFS_I(d_inode(dentry))->root) >> 32;
+ buf->f_fsid.val[1] ^= btrfs_root_id(BTRFS_I(d_inode(dentry))->root);
return 0;
}
@@ -2374,6 +2371,24 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
return 0;
}
+static long btrfs_nr_cached_objects(struct super_block *sb, struct shrink_control *sc)
+{
+ struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+ const s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps);
+
+ trace_btrfs_extent_map_shrinker_count(fs_info, nr);
+
+ return nr;
+}
+
+static long btrfs_free_cached_objects(struct super_block *sb, struct shrink_control *sc)
+{
+ const long nr_to_scan = min_t(unsigned long, LONG_MAX, sc->nr_to_scan);
+ struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+
+ return btrfs_free_extent_maps(fs_info, nr_to_scan);
+}
+
static const struct super_operations btrfs_super_ops = {
.drop_inode = btrfs_drop_inode,
.evict_inode = btrfs_evict_inode,
@@ -2387,6 +2402,8 @@ static const struct super_operations btrfs_super_ops = {
.statfs = btrfs_statfs,
.freeze_fs = btrfs_freeze,
.unfreeze_fs = btrfs_unfreeze,
+ .nr_cached_objects = btrfs_nr_cached_objects,
+ .free_cached_objects = btrfs_free_cached_objects,
};
static const struct file_operations btrfs_ctl_fops = {
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index c6387a8ddb94..af545b6b1190 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -2339,7 +2339,7 @@ int btrfs_sysfs_add_one_qgroup(struct btrfs_fs_info *fs_info,
struct kobject *qgroups_kobj = fs_info->qgroups_kobj;
int ret;
- if (test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state))
+ if (btrfs_is_testing(fs_info))
return 0;
if (qgroup->kobj.state_initialized)
return 0;
@@ -2360,7 +2360,7 @@ void btrfs_sysfs_del_qgroups(struct btrfs_fs_info *fs_info)
struct btrfs_qgroup *qgroup;
struct btrfs_qgroup *next;
- if (test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state))
+ if (btrfs_is_testing(fs_info))
return;
rbtree_postorder_for_each_entry_safe(qgroup, next,
@@ -2381,7 +2381,7 @@ int btrfs_sysfs_add_qgroups(struct btrfs_fs_info *fs_info)
struct btrfs_qgroup *next;
int ret = 0;
- if (test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state))
+ if (btrfs_is_testing(fs_info))
return 0;
ASSERT(fsid_kobj);
@@ -2413,7 +2413,7 @@ out:
void btrfs_sysfs_del_one_qgroup(struct btrfs_fs_info *fs_info,
struct btrfs_qgroup *qgroup)
{
- if (test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state))
+ if (btrfs_is_testing(fs_info))
return;
if (qgroup->kobj.state_initialized) {
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
index 709c6cc9706a..dce0387ef155 100644
--- a/fs/btrfs/tests/btrfs-tests.c
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -160,8 +160,7 @@ void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info)
if (!fs_info)
return;
- if (WARN_ON(!test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO,
- &fs_info->fs_state)))
+ if (WARN_ON(!btrfs_is_testing(fs_info)))
return;
test_mnt->mnt_sb->s_fs_info = NULL;
diff --git a/fs/btrfs/tests/extent-map-tests.c b/fs/btrfs/tests/extent-map-tests.c
index 253cce7ffecf..ba36794ba2d5 100644
--- a/fs/btrfs/tests/extent-map-tests.c
+++ b/fs/btrfs/tests/extent-map-tests.c
@@ -11,19 +11,22 @@
#include "../disk-io.h"
#include "../block-group.h"
-static void free_extent_map_tree(struct extent_map_tree *em_tree)
+static int free_extent_map_tree(struct btrfs_inode *inode)
{
+ struct extent_map_tree *em_tree = &inode->extent_tree;
struct extent_map *em;
struct rb_node *node;
+ int ret = 0;
write_lock(&em_tree->lock);
while (!RB_EMPTY_ROOT(&em_tree->map.rb_root)) {
node = rb_first_cached(&em_tree->map);
em = rb_entry(node, struct extent_map, rb_node);
- remove_extent_mapping(em_tree, em);
+ remove_extent_mapping(inode, em);
#ifdef CONFIG_BTRFS_DEBUG
if (refcount_read(&em->refs) != 1) {
+ ret = -EINVAL;
test_err(
"em leak: em (start %llu len %llu block_start %llu block_len %llu) refs %d",
em->start, em->len, em->block_start,
@@ -35,6 +38,8 @@ static void free_extent_map_tree(struct extent_map_tree *em_tree)
free_extent_map(em);
}
write_unlock(&em_tree->lock);
+
+ return ret;
}
/*
@@ -53,13 +58,14 @@ static void free_extent_map_tree(struct extent_map_tree *em_tree)
* ->add_extent_mapping(0, 16K)
* -> #handle -EEXIST
*/
-static int test_case_1(struct btrfs_fs_info *fs_info,
- struct extent_map_tree *em_tree)
+static int test_case_1(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
{
+ struct extent_map_tree *em_tree = &inode->extent_tree;
struct extent_map *em;
u64 start = 0;
u64 len = SZ_8K;
int ret;
+ int ret2;
em = alloc_extent_map();
if (!em) {
@@ -73,7 +79,7 @@ static int test_case_1(struct btrfs_fs_info *fs_info,
em->block_start = 0;
em->block_len = SZ_16K;
write_lock(&em_tree->lock);
- ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len);
+ ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
if (ret < 0) {
test_err("cannot add extent range [0, 16K)");
@@ -94,7 +100,7 @@ static int test_case_1(struct btrfs_fs_info *fs_info,
em->block_start = SZ_32K; /* avoid merging */
em->block_len = SZ_4K;
write_lock(&em_tree->lock);
- ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len);
+ ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
if (ret < 0) {
test_err("cannot add extent range [16K, 20K)");
@@ -115,7 +121,7 @@ static int test_case_1(struct btrfs_fs_info *fs_info,
em->block_start = start;
em->block_len = len;
write_lock(&em_tree->lock);
- ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len);
+ ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
if (ret) {
test_err("case1 [%llu %llu]: ret %d", start, start + len, ret);
@@ -137,7 +143,9 @@ static int test_case_1(struct btrfs_fs_info *fs_info,
}
free_extent_map(em);
out:
- free_extent_map_tree(em_tree);
+ ret2 = free_extent_map_tree(inode);
+ if (ret == 0)
+ ret = ret2;
return ret;
}
@@ -148,11 +156,12 @@ out:
* Reading the inline ending up with EEXIST, ie. read an inline
* extent and discard page cache and read it again.
*/
-static int test_case_2(struct btrfs_fs_info *fs_info,
- struct extent_map_tree *em_tree)
+static int test_case_2(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
{
+ struct extent_map_tree *em_tree = &inode->extent_tree;
struct extent_map *em;
int ret;
+ int ret2;
em = alloc_extent_map();
if (!em) {
@@ -166,7 +175,7 @@ static int test_case_2(struct btrfs_fs_info *fs_info,
em->block_start = EXTENT_MAP_INLINE;
em->block_len = (u64)-1;
write_lock(&em_tree->lock);
- ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len);
+ ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
if (ret < 0) {
test_err("cannot add extent range [0, 1K)");
@@ -187,7 +196,7 @@ static int test_case_2(struct btrfs_fs_info *fs_info,
em->block_start = SZ_4K;
em->block_len = SZ_4K;
write_lock(&em_tree->lock);
- ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len);
+ ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
if (ret < 0) {
test_err("cannot add extent range [4K, 8K)");
@@ -208,7 +217,7 @@ static int test_case_2(struct btrfs_fs_info *fs_info,
em->block_start = EXTENT_MAP_INLINE;
em->block_len = (u64)-1;
write_lock(&em_tree->lock);
- ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len);
+ ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
if (ret) {
test_err("case2 [0 1K]: ret %d", ret);
@@ -229,17 +238,21 @@ static int test_case_2(struct btrfs_fs_info *fs_info,
}
free_extent_map(em);
out:
- free_extent_map_tree(em_tree);
+ ret2 = free_extent_map_tree(inode);
+ if (ret == 0)
+ ret = ret2;
return ret;
}
static int __test_case_3(struct btrfs_fs_info *fs_info,
- struct extent_map_tree *em_tree, u64 start)
+ struct btrfs_inode *inode, u64 start)
{
+ struct extent_map_tree *em_tree = &inode->extent_tree;
struct extent_map *em;
u64 len = SZ_4K;
int ret;
+ int ret2;
em = alloc_extent_map();
if (!em) {
@@ -253,7 +266,7 @@ static int __test_case_3(struct btrfs_fs_info *fs_info,
em->block_start = SZ_4K;
em->block_len = SZ_4K;
write_lock(&em_tree->lock);
- ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len);
+ ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
if (ret < 0) {
test_err("cannot add extent range [4K, 8K)");
@@ -274,7 +287,7 @@ static int __test_case_3(struct btrfs_fs_info *fs_info,
em->block_start = 0;
em->block_len = SZ_16K;
write_lock(&em_tree->lock);
- ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len);
+ ret = btrfs_add_extent_mapping(inode, &em, start, len);
write_unlock(&em_tree->lock);
if (ret) {
test_err("case3 [%llu %llu): ret %d",
@@ -301,7 +314,9 @@ static int __test_case_3(struct btrfs_fs_info *fs_info,
}
free_extent_map(em);
out:
- free_extent_map_tree(em_tree);
+ ret2 = free_extent_map_tree(inode);
+ if (ret == 0)
+ ret = ret2;
return ret;
}
@@ -322,28 +337,29 @@ out:
* -> add_extent_mapping()
* -> add_extent_mapping()
*/
-static int test_case_3(struct btrfs_fs_info *fs_info,
- struct extent_map_tree *em_tree)
+static int test_case_3(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
{
int ret;
- ret = __test_case_3(fs_info, em_tree, 0);
+ ret = __test_case_3(fs_info, inode, 0);
if (ret)
return ret;
- ret = __test_case_3(fs_info, em_tree, SZ_8K);
+ ret = __test_case_3(fs_info, inode, SZ_8K);
if (ret)
return ret;
- ret = __test_case_3(fs_info, em_tree, (12 * SZ_1K));
+ ret = __test_case_3(fs_info, inode, (12 * SZ_1K));
return ret;
}
static int __test_case_4(struct btrfs_fs_info *fs_info,
- struct extent_map_tree *em_tree, u64 start)
+ struct btrfs_inode *inode, u64 start)
{
+ struct extent_map_tree *em_tree = &inode->extent_tree;
struct extent_map *em;
u64 len = SZ_4K;
int ret;
+ int ret2;
em = alloc_extent_map();
if (!em) {
@@ -357,7 +373,7 @@ static int __test_case_4(struct btrfs_fs_info *fs_info,
em->block_start = 0;
em->block_len = SZ_8K;
write_lock(&em_tree->lock);
- ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len);
+ ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
if (ret < 0) {
test_err("cannot add extent range [0, 8K)");
@@ -378,7 +394,7 @@ static int __test_case_4(struct btrfs_fs_info *fs_info,
em->block_start = SZ_16K; /* avoid merging */
em->block_len = 24 * SZ_1K;
write_lock(&em_tree->lock);
- ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len);
+ ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
if (ret < 0) {
test_err("cannot add extent range [8K, 32K)");
@@ -398,7 +414,7 @@ static int __test_case_4(struct btrfs_fs_info *fs_info,
em->block_start = 0;
em->block_len = SZ_32K;
write_lock(&em_tree->lock);
- ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, start, len);
+ ret = btrfs_add_extent_mapping(inode, &em, start, len);
write_unlock(&em_tree->lock);
if (ret) {
test_err("case4 [%llu %llu): ret %d",
@@ -420,7 +436,9 @@ static int __test_case_4(struct btrfs_fs_info *fs_info,
}
free_extent_map(em);
out:
- free_extent_map_tree(em_tree);
+ ret2 = free_extent_map_tree(inode);
+ if (ret == 0)
+ ret = ret2;
return ret;
}
@@ -450,23 +468,22 @@ out:
* # handle -EEXIST when adding
* # [0, 32K)
*/
-static int test_case_4(struct btrfs_fs_info *fs_info,
- struct extent_map_tree *em_tree)
+static int test_case_4(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
{
int ret;
- ret = __test_case_4(fs_info, em_tree, 0);
+ ret = __test_case_4(fs_info, inode, 0);
if (ret)
return ret;
- ret = __test_case_4(fs_info, em_tree, SZ_4K);
+ ret = __test_case_4(fs_info, inode, SZ_4K);
return ret;
}
-static int add_compressed_extent(struct btrfs_fs_info *fs_info,
- struct extent_map_tree *em_tree,
+static int add_compressed_extent(struct btrfs_inode *inode,
u64 start, u64 len, u64 block_start)
{
+ struct extent_map_tree *em_tree = &inode->extent_tree;
struct extent_map *em;
int ret;
@@ -482,7 +499,7 @@ static int add_compressed_extent(struct btrfs_fs_info *fs_info,
em->block_len = SZ_4K;
em->flags |= EXTENT_FLAG_COMPRESS_ZLIB;
write_lock(&em_tree->lock);
- ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len);
+ ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
free_extent_map(em);
if (ret < 0) {
@@ -588,53 +605,44 @@ static int validate_range(struct extent_map_tree *em_tree, int index)
* They'll have the EXTENT_FLAG_COMPRESSED flag set to keep the em tree from
* merging the em's.
*/
-static int test_case_5(struct btrfs_fs_info *fs_info)
+static int test_case_5(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
{
- struct extent_map_tree *em_tree;
- struct inode *inode;
u64 start, end;
int ret;
+ int ret2;
test_msg("Running btrfs_drop_extent_map_range tests");
- inode = btrfs_new_test_inode();
- if (!inode) {
- test_std_err(TEST_ALLOC_INODE);
- return -ENOMEM;
- }
-
- em_tree = &BTRFS_I(inode)->extent_tree;
-
/* [0, 12k) */
- ret = add_compressed_extent(fs_info, em_tree, 0, SZ_4K * 3, 0);
+ ret = add_compressed_extent(inode, 0, SZ_4K * 3, 0);
if (ret) {
test_err("cannot add extent range [0, 12K)");
goto out;
}
/* [12k, 24k) */
- ret = add_compressed_extent(fs_info, em_tree, SZ_4K * 3, SZ_4K * 3, SZ_4K);
+ ret = add_compressed_extent(inode, SZ_4K * 3, SZ_4K * 3, SZ_4K);
if (ret) {
test_err("cannot add extent range [12k, 24k)");
goto out;
}
/* [24k, 36k) */
- ret = add_compressed_extent(fs_info, em_tree, SZ_4K * 6, SZ_4K * 3, SZ_8K);
+ ret = add_compressed_extent(inode, SZ_4K * 6, SZ_4K * 3, SZ_8K);
if (ret) {
test_err("cannot add extent range [12k, 24k)");
goto out;
}
/* [36k, 40k) */
- ret = add_compressed_extent(fs_info, em_tree, SZ_32K + SZ_4K, SZ_4K, SZ_4K * 3);
+ ret = add_compressed_extent(inode, SZ_32K + SZ_4K, SZ_4K, SZ_4K * 3);
if (ret) {
test_err("cannot add extent range [12k, 24k)");
goto out;
}
/* [40k, 64k) */
- ret = add_compressed_extent(fs_info, em_tree, SZ_4K * 10, SZ_4K * 6, SZ_16K);
+ ret = add_compressed_extent(inode, SZ_4K * 10, SZ_4K * 6, SZ_16K);
if (ret) {
test_err("cannot add extent range [12k, 24k)");
goto out;
@@ -643,36 +651,39 @@ static int test_case_5(struct btrfs_fs_info *fs_info)
/* Drop [8k, 12k) */
start = SZ_8K;
end = (3 * SZ_4K) - 1;
- btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, false);
- ret = validate_range(&BTRFS_I(inode)->extent_tree, 0);
+ btrfs_drop_extent_map_range(inode, start, end, false);
+ ret = validate_range(&inode->extent_tree, 0);
if (ret)
goto out;
/* Drop [12k, 20k) */
start = SZ_4K * 3;
end = SZ_16K + SZ_4K - 1;
- btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, false);
- ret = validate_range(&BTRFS_I(inode)->extent_tree, 1);
+ btrfs_drop_extent_map_range(inode, start, end, false);
+ ret = validate_range(&inode->extent_tree, 1);
if (ret)
goto out;
/* Drop [28k, 32k) */
start = SZ_32K - SZ_4K;
end = SZ_32K - 1;
- btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, false);
- ret = validate_range(&BTRFS_I(inode)->extent_tree, 2);
+ btrfs_drop_extent_map_range(inode, start, end, false);
+ ret = validate_range(&inode->extent_tree, 2);
if (ret)
goto out;
/* Drop [32k, 64k) */
start = SZ_32K;
end = SZ_64K - 1;
- btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, false);
- ret = validate_range(&BTRFS_I(inode)->extent_tree, 3);
+ btrfs_drop_extent_map_range(inode, start, end, false);
+ ret = validate_range(&inode->extent_tree, 3);
if (ret)
goto out;
out:
- iput(inode);
+ ret2 = free_extent_map_tree(inode);
+ if (ret == 0)
+ ret = ret2;
+
return ret;
}
@@ -681,23 +692,26 @@ out:
* for areas between two existing ems. Validate it doesn't do this when there
* are two unmerged em's side by side.
*/
-static int test_case_6(struct btrfs_fs_info *fs_info, struct extent_map_tree *em_tree)
+static int test_case_6(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
{
+ struct extent_map_tree *em_tree = &inode->extent_tree;
struct extent_map *em = NULL;
int ret;
+ int ret2;
- ret = add_compressed_extent(fs_info, em_tree, 0, SZ_4K, 0);
+ ret = add_compressed_extent(inode, 0, SZ_4K, 0);
if (ret)
goto out;
- ret = add_compressed_extent(fs_info, em_tree, SZ_4K, SZ_4K, 0);
+ ret = add_compressed_extent(inode, SZ_4K, SZ_4K, 0);
if (ret)
goto out;
em = alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
em->start = SZ_4K;
@@ -705,7 +719,7 @@ static int test_case_6(struct btrfs_fs_info *fs_info, struct extent_map_tree *em
em->block_start = SZ_16K;
em->block_len = SZ_16K;
write_lock(&em_tree->lock);
- ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, 0, SZ_8K);
+ ret = btrfs_add_extent_mapping(inode, &em, 0, SZ_8K);
write_unlock(&em_tree->lock);
if (ret != 0) {
@@ -725,7 +739,10 @@ static int test_case_6(struct btrfs_fs_info *fs_info, struct extent_map_tree *em
ret = 0;
out:
free_extent_map(em);
- free_extent_map_tree(em_tree);
+ ret2 = free_extent_map_tree(inode);
+ if (ret == 0)
+ ret = ret2;
+
return ret;
}
@@ -734,28 +751,19 @@ out:
* true would mess up the start/end calculations and subsequent splits would be
* incorrect.
*/
-static int test_case_7(struct btrfs_fs_info *fs_info)
+static int test_case_7(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
{
- struct extent_map_tree *em_tree;
+ struct extent_map_tree *em_tree = &inode->extent_tree;
struct extent_map *em;
- struct inode *inode;
int ret;
+ int ret2;
test_msg("Running btrfs_drop_extent_cache with pinned");
- inode = btrfs_new_test_inode();
- if (!inode) {
- test_std_err(TEST_ALLOC_INODE);
- return -ENOMEM;
- }
-
- em_tree = &BTRFS_I(inode)->extent_tree;
-
em = alloc_extent_map();
if (!em) {
test_std_err(TEST_ALLOC_EXTENT_MAP);
- ret = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
/* [0, 16K), pinned */
@@ -765,7 +773,7 @@ static int test_case_7(struct btrfs_fs_info *fs_info)
em->block_len = SZ_4K;
em->flags |= EXTENT_FLAG_PINNED;
write_lock(&em_tree->lock);
- ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len);
+ ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
if (ret < 0) {
test_err("couldn't add extent map");
@@ -786,7 +794,7 @@ static int test_case_7(struct btrfs_fs_info *fs_info)
em->block_start = SZ_32K;
em->block_len = SZ_16K;
write_lock(&em_tree->lock);
- ret = btrfs_add_extent_mapping(fs_info, em_tree, &em, em->start, em->len);
+ ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
write_unlock(&em_tree->lock);
if (ret < 0) {
test_err("couldn't add extent map");
@@ -798,7 +806,7 @@ static int test_case_7(struct btrfs_fs_info *fs_info)
* Drop [0, 36K) This should skip the [0, 4K) extent and then split the
* [32K, 48K) extent.
*/
- btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (36 * SZ_1K) - 1, true);
+ btrfs_drop_extent_map_range(inode, 0, (36 * SZ_1K) - 1, true);
/* Make sure our extent maps look sane. */
ret = -EINVAL;
@@ -847,6 +855,11 @@ static int test_case_7(struct btrfs_fs_info *fs_info)
goto out;
}
+ if (em->block_start != SZ_32K + SZ_4K) {
+ test_err("em->block_start is %llu, expected 36K", em->block_start);
+ goto out;
+ }
+
free_extent_map(em);
read_lock(&em_tree->lock);
@@ -860,7 +873,14 @@ static int test_case_7(struct btrfs_fs_info *fs_info)
ret = 0;
out:
free_extent_map(em);
- iput(inode);
+ /* Unpin our extent to prevent warning when removing it below. */
+ ret2 = unpin_extent_cache(inode, 0, SZ_16K, 0);
+ if (ret == 0)
+ ret = ret2;
+ ret2 = free_extent_map_tree(inode);
+ if (ret == 0)
+ ret = ret2;
+
return ret;
}
@@ -954,7 +974,8 @@ out_free:
int btrfs_test_extent_map(void)
{
struct btrfs_fs_info *fs_info = NULL;
- struct extent_map_tree *em_tree;
+ struct inode *inode;
+ struct btrfs_root *root = NULL;
int ret = 0, i;
struct rmap_test_vector rmap_tests[] = {
{
@@ -1003,33 +1024,42 @@ int btrfs_test_extent_map(void)
return -ENOMEM;
}
- em_tree = kzalloc(sizeof(*em_tree), GFP_KERNEL);
- if (!em_tree) {
+ inode = btrfs_new_test_inode();
+ if (!inode) {
+ test_std_err(TEST_ALLOC_INODE);
ret = -ENOMEM;
goto out;
}
- extent_map_tree_init(em_tree);
+ root = btrfs_alloc_dummy_root(fs_info);
+ if (IS_ERR(root)) {
+ test_std_err(TEST_ALLOC_ROOT);
+ ret = PTR_ERR(root);
+ root = NULL;
+ goto out;
+ }
- ret = test_case_1(fs_info, em_tree);
+ BTRFS_I(inode)->root = root;
+
+ ret = test_case_1(fs_info, BTRFS_I(inode));
if (ret)
goto out;
- ret = test_case_2(fs_info, em_tree);
+ ret = test_case_2(fs_info, BTRFS_I(inode));
if (ret)
goto out;
- ret = test_case_3(fs_info, em_tree);
+ ret = test_case_3(fs_info, BTRFS_I(inode));
if (ret)
goto out;
- ret = test_case_4(fs_info, em_tree);
+ ret = test_case_4(fs_info, BTRFS_I(inode));
if (ret)
goto out;
- ret = test_case_5(fs_info);
+ ret = test_case_5(fs_info, BTRFS_I(inode));
if (ret)
goto out;
- ret = test_case_6(fs_info, em_tree);
+ ret = test_case_6(fs_info, BTRFS_I(inode));
if (ret)
goto out;
- ret = test_case_7(fs_info);
+ ret = test_case_7(fs_info, BTRFS_I(inode));
if (ret)
goto out;
@@ -1041,7 +1071,8 @@ int btrfs_test_extent_map(void)
}
out:
- kfree(em_tree);
+ iput(inode);
+ btrfs_free_dummy_root(root);
btrfs_free_dummy_fs_info(fs_info);
return ret;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 46e8426adf4f..3388c836b9a5 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -426,7 +426,7 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans,
return 0;
}
radix_tree_tag_set(&fs_info->fs_roots_radix,
- (unsigned long)root->root_key.objectid,
+ (unsigned long)btrfs_root_id(root),
BTRFS_ROOT_TRANS_TAG);
spin_unlock(&fs_info->fs_roots_radix_lock);
root->last_trans = trans->transid;
@@ -472,7 +472,7 @@ void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
/* Make sure we don't try to update the root at commit time */
spin_lock(&fs_info->fs_roots_radix_lock);
radix_tree_tag_clear(&fs_info->fs_roots_radix,
- (unsigned long)root->root_key.objectid,
+ (unsigned long)btrfs_root_id(root),
BTRFS_ROOT_TRANS_TAG);
spin_unlock(&fs_info->fs_roots_radix_lock);
}
@@ -550,7 +550,7 @@ static inline bool need_reserve_reloc_root(struct btrfs_root *root)
if (!fs_info->reloc_ctl ||
!test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
- root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
+ btrfs_root_id(root) == BTRFS_TREE_RELOC_OBJECTID ||
root->reloc_root)
return false;
@@ -745,14 +745,6 @@ again:
h->reloc_reserved = reloc_reserved;
}
- /*
- * Now that we have found a transaction to be a part of, convert the
- * qgroup reservation from prealloc to pertrans. A different transaction
- * can't race in and free our pertrans out from under us.
- */
- if (qgroup_reserved)
- btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
-
got_it:
if (!current->journal_info)
current->journal_info = h;
@@ -786,8 +778,15 @@ got_it:
* not just freed.
*/
btrfs_end_transaction(h);
- return ERR_PTR(ret);
+ goto reserve_fail;
}
+ /*
+ * Now that we have found a transaction to be a part of, convert the
+ * qgroup reservation from prealloc to pertrans. A different transaction
+ * can't race in and free our pertrans out from under us.
+ */
+ if (qgroup_reserved)
+ btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
return h;
@@ -1053,7 +1052,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
{
struct btrfs_fs_info *info = trans->fs_info;
struct btrfs_transaction *cur_trans = trans->transaction;
- int err = 0;
+ int ret = 0;
if (refcount_read(&trans->use_count) > 1) {
refcount_dec(&trans->use_count);
@@ -1092,13 +1091,13 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
if (TRANS_ABORTED(trans) || BTRFS_FS_ERROR(info)) {
wake_up_process(info->transaction_kthread);
if (TRANS_ABORTED(trans))
- err = trans->aborted;
+ ret = trans->aborted;
else
- err = -EROFS;
+ ret = -EROFS;
}
kmem_cache_free(btrfs_trans_handle_cachep, trans);
- return err;
+ return ret;
}
int btrfs_end_transaction(struct btrfs_trans_handle *trans)
@@ -1119,8 +1118,7 @@ int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans)
int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages, int mark)
{
- int err = 0;
- int werr = 0;
+ int ret = 0;
struct address_space *mapping = fs_info->btree_inode->i_mapping;
struct extent_state *cached_state = NULL;
u64 start = 0;
@@ -1130,7 +1128,7 @@ int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
mark, &cached_state)) {
bool wait_writeback = false;
- err = convert_extent_bit(dirty_pages, start, end,
+ ret = convert_extent_bit(dirty_pages, start, end,
EXTENT_NEED_WAIT,
mark, &cached_state);
/*
@@ -1146,22 +1144,22 @@ int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
* We cleanup any entries left in the io tree when committing
* the transaction (through extent_io_tree_release()).
*/
- if (err == -ENOMEM) {
- err = 0;
+ if (ret == -ENOMEM) {
+ ret = 0;
wait_writeback = true;
}
- if (!err)
- err = filemap_fdatawrite_range(mapping, start, end);
- if (err)
- werr = err;
- else if (wait_writeback)
- werr = filemap_fdatawait_range(mapping, start, end);
+ if (!ret)
+ ret = filemap_fdatawrite_range(mapping, start, end);
+ if (!ret && wait_writeback)
+ ret = filemap_fdatawait_range(mapping, start, end);
free_extent_state(cached_state);
+ if (ret)
+ break;
cached_state = NULL;
cond_resched();
start = end + 1;
}
- return werr;
+ return ret;
}
/*
@@ -1173,12 +1171,11 @@ int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages)
{
- int err = 0;
- int werr = 0;
struct address_space *mapping = fs_info->btree_inode->i_mapping;
struct extent_state *cached_state = NULL;
u64 start = 0;
u64 end;
+ int ret = 0;
while (find_first_extent_bit(dirty_pages, start, &start, &end,
EXTENT_NEED_WAIT, &cached_state)) {
@@ -1190,22 +1187,20 @@ static int __btrfs_wait_marked_extents(struct btrfs_fs_info *fs_info,
* concurrently - we do it only at transaction commit time when
* it's safe to do it (through extent_io_tree_release()).
*/
- err = clear_extent_bit(dirty_pages, start, end,
+ ret = clear_extent_bit(dirty_pages, start, end,
EXTENT_NEED_WAIT, &cached_state);
- if (err == -ENOMEM)
- err = 0;
- if (!err)
- err = filemap_fdatawait_range(mapping, start, end);
- if (err)
- werr = err;
+ if (ret == -ENOMEM)
+ ret = 0;
+ if (!ret)
+ ret = filemap_fdatawait_range(mapping, start, end);
free_extent_state(cached_state);
+ if (ret)
+ break;
cached_state = NULL;
cond_resched();
start = end + 1;
}
- if (err)
- werr = err;
- return werr;
+ return ret;
}
static int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
@@ -1230,7 +1225,7 @@ int btrfs_wait_tree_log_extents(struct btrfs_root *log_root, int mark)
bool errors = false;
int err;
- ASSERT(log_root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
+ ASSERT(btrfs_root_id(log_root) == BTRFS_TREE_LOG_OBJECTID);
err = __btrfs_wait_marked_extents(fs_info, dirty_pages);
if ((mark & EXTENT_DIRTY) &&
@@ -1493,8 +1488,9 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
ASSERT(atomic_read(&root->log_commit[1]) == 0);
radix_tree_tag_clear(&fs_info->fs_roots_radix,
- (unsigned long)root->root_key.objectid,
+ (unsigned long)btrfs_root_id(root),
BTRFS_ROOT_TRANS_TAG);
+ btrfs_qgroup_free_meta_all_pertrans(root);
spin_unlock(&fs_info->fs_roots_radix_lock);
btrfs_free_log(trans, root);
@@ -1519,7 +1515,6 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
if (ret2)
return ret2;
spin_lock(&fs_info->fs_roots_radix_lock);
- btrfs_qgroup_free_meta_all_pertrans(root);
}
}
spin_unlock(&fs_info->fs_roots_radix_lock);
@@ -1584,8 +1579,8 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
goto out;
/* Now qgroup are all updated, we can inherit it to new qgroups */
- ret = btrfs_qgroup_inherit(trans, src->root_key.objectid, dst_objectid,
- parent->root_key.objectid, inherit);
+ ret = btrfs_qgroup_inherit(trans, btrfs_root_id(src), dst_objectid,
+ btrfs_root_id(parent), inherit);
if (ret < 0)
goto out;
@@ -1823,7 +1818,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
* insert root back/forward references
*/
ret = btrfs_add_root_ref(trans, objectid,
- parent_root->root_key.objectid,
+ btrfs_root_id(parent_root),
btrfs_ino(BTRFS_I(parent_inode)), index,
&fname.disk_name);
if (ret) {
@@ -1856,16 +1851,14 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
ret = qgroup_account_snapshot(trans, root, parent_root,
pending->inherit, objectid);
else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
- ret = btrfs_qgroup_inherit(trans, root->root_key.objectid, objectid,
- parent_root->root_key.objectid, pending->inherit);
+ ret = btrfs_qgroup_inherit(trans, btrfs_root_id(root), objectid,
+ btrfs_root_id(parent_root), pending->inherit);
if (ret < 0)
goto fail;
ret = btrfs_insert_dir_item(trans, &fname.disk_name,
BTRFS_I(parent_inode), &key, BTRFS_FT_DIR,
index);
- /* We have check then name at the beginning, so it is impossible. */
- BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto fail;
@@ -2626,7 +2619,7 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info *fs_info)
list_del_init(&root->root_list);
spin_unlock(&fs_info->trans_lock);
- btrfs_debug(fs_info, "cleaner removing %llu", root->root_key.objectid);
+ btrfs_debug(fs_info, "cleaner removing %llu", btrfs_root_id(root));
btrfs_kill_all_delayed_nodes(root);
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index c8fbcae4e88e..a2c3651a3d8f 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -1797,6 +1797,11 @@ enum btrfs_tree_block_status __btrfs_check_leaf(struct extent_buffer *leaf)
return BTRFS_TREE_BLOCK_INVALID_LEVEL;
}
+ if (unlikely(!btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_WRITTEN))) {
+ generic_err(leaf, 0, "invalid flag for leaf, WRITTEN not set");
+ return BTRFS_TREE_BLOCK_WRITTEN_NOT_SET;
+ }
+
/*
* Extent buffers from a relocation tree have a owner field that
* corresponds to the subvolume tree they are based on. So just from an
@@ -1858,6 +1863,7 @@ enum btrfs_tree_block_status __btrfs_check_leaf(struct extent_buffer *leaf)
for (slot = 0; slot < nritems; slot++) {
u32 item_end_expected;
u64 item_data_end;
+ enum btrfs_tree_block_status ret;
btrfs_item_key_to_cpu(leaf, &key, slot);
@@ -1913,21 +1919,10 @@ enum btrfs_tree_block_status __btrfs_check_leaf(struct extent_buffer *leaf)
return BTRFS_TREE_BLOCK_INVALID_OFFSETS;
}
- /*
- * We only want to do this if WRITTEN is set, otherwise the leaf
- * may be in some intermediate state and won't appear valid.
- */
- if (btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_WRITTEN)) {
- enum btrfs_tree_block_status ret;
-
- /*
- * Check if the item size and content meet other
- * criteria
- */
- ret = check_leaf_item(leaf, &key, slot, &prev_key);
- if (unlikely(ret != BTRFS_TREE_BLOCK_CLEAN))
- return ret;
- }
+ /* Check if the item size and content meet other criteria. */
+ ret = check_leaf_item(leaf, &key, slot, &prev_key);
+ if (unlikely(ret != BTRFS_TREE_BLOCK_CLEAN))
+ return ret;
prev_key.objectid = key.objectid;
prev_key.type = key.type;
@@ -1957,6 +1952,11 @@ enum btrfs_tree_block_status __btrfs_check_node(struct extent_buffer *node)
int level = btrfs_header_level(node);
u64 bytenr;
+ if (unlikely(!btrfs_header_flag(node, BTRFS_HEADER_FLAG_WRITTEN))) {
+ generic_err(node, 0, "invalid flag for node, WRITTEN not set");
+ return BTRFS_TREE_BLOCK_WRITTEN_NOT_SET;
+ }
+
if (unlikely(level <= 0 || level >= BTRFS_MAX_LEVEL)) {
generic_err(node, 0,
"invalid level for node, have %d expect [1, %d]",
@@ -2021,7 +2021,7 @@ int btrfs_check_eb_owner(const struct extent_buffer *eb, u64 root_owner)
* Skip dummy fs, as selftests don't create unique ebs for each dummy
* root.
*/
- if (test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &eb->fs_info->fs_state))
+ if (btrfs_is_testing(eb->fs_info))
return 0;
/*
* There are several call sites (backref walking, qgroup, and data
diff --git a/fs/btrfs/tree-checker.h b/fs/btrfs/tree-checker.h
index 5c809b50b2d0..01669cfa6578 100644
--- a/fs/btrfs/tree-checker.h
+++ b/fs/btrfs/tree-checker.h
@@ -53,6 +53,7 @@ enum btrfs_tree_block_status {
BTRFS_TREE_BLOCK_INVALID_BLOCKPTR,
BTRFS_TREE_BLOCK_INVALID_ITEM,
BTRFS_TREE_BLOCK_INVALID_OWNER,
+ BTRFS_TREE_BLOCK_WRITTEN_NOT_SET,
};
/*
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 472918a5bc73..5146387b416b 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -391,7 +391,7 @@ static int overwrite_item(struct btrfs_trans_handle *trans,
* the leaf before writing into the log tree. See the comments at
* copy_items() for more details.
*/
- ASSERT(root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID);
+ ASSERT(btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID);
item_size = btrfs_item_size(eb, slot);
src_ptr = btrfs_item_ptr_offset(eb, slot);
@@ -748,7 +748,6 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
goto out;
if (ins.objectid > 0) {
- struct btrfs_ref ref = { 0 };
u64 csum_start;
u64 csum_end;
LIST_HEAD(ordered_sums);
@@ -762,13 +761,15 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
if (ret < 0) {
goto out;
} else if (ret == 0) {
- btrfs_init_generic_ref(&ref,
- BTRFS_ADD_DELAYED_REF,
- ins.objectid, ins.offset, 0,
- root->root_key.objectid);
- btrfs_init_data_ref(&ref,
- root->root_key.objectid,
- key->objectid, offset, 0, false);
+ struct btrfs_ref ref = {
+ .action = BTRFS_ADD_DELAYED_REF,
+ .bytenr = ins.objectid,
+ .num_bytes = ins.offset,
+ .owning_root = btrfs_root_id(root),
+ .ref_root = btrfs_root_id(root),
+ };
+ btrfs_init_data_ref(&ref, key->objectid, offset,
+ 0, false);
ret = btrfs_inc_extent_ref(trans, &ref);
if (ret)
goto out;
@@ -778,7 +779,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
* allocation tree
*/
ret = btrfs_alloc_logged_file_extent(trans,
- root->root_key.objectid,
+ btrfs_root_id(root),
key->objectid, offset, &ins);
if (ret)
goto out;
@@ -797,9 +798,10 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
ret = btrfs_lookup_csums_list(root->log_root,
csum_start, csum_end - 1,
- &ordered_sums, 0, false);
- if (ret)
+ &ordered_sums, false);
+ if (ret < 0)
goto out;
+ ret = 0;
/*
* Now delete all existing cums in the csum root that
* cover our range. We do this because we can have an
@@ -3045,7 +3047,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
if (ret != -ENOSPC)
btrfs_err(fs_info,
"failed to update log for root %llu ret %d",
- root->root_key.objectid, ret);
+ btrfs_root_id(root), ret);
btrfs_wait_tree_log_extents(log, mark);
mutex_unlock(&log_root_tree->log_mutex);
goto out;
@@ -4460,9 +4462,10 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
disk_bytenr += extent_offset;
ret = btrfs_lookup_csums_list(csum_root, disk_bytenr,
disk_bytenr + extent_num_bytes - 1,
- &ordered_sums, 0, false);
- if (ret)
+ &ordered_sums, false);
+ if (ret < 0)
goto out;
+ ret = 0;
list_for_each_entry_safe(sums, sums_next, &ordered_sums, list) {
if (!ret)
@@ -4574,8 +4577,8 @@ static int log_extent_csums(struct btrfs_trans_handle *trans,
struct btrfs_root *csum_root;
u64 csum_offset;
u64 csum_len;
- u64 mod_start = em->mod_start;
- u64 mod_len = em->mod_len;
+ u64 mod_start = em->start;
+ u64 mod_len = em->len;
LIST_HEAD(ordered_sums);
int ret = 0;
@@ -4655,9 +4658,10 @@ static int log_extent_csums(struct btrfs_trans_handle *trans,
csum_root = btrfs_csum_root(trans->fs_info, em->block_start);
ret = btrfs_lookup_csums_list(csum_root, em->block_start + csum_offset,
em->block_start + csum_offset +
- csum_len - 1, &ordered_sums, 0, false);
- if (ret)
+ csum_len - 1, &ordered_sums, false);
+ if (ret < 0)
return ret;
+ ret = 0;
while (!list_empty(&ordered_sums)) {
struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
@@ -4945,7 +4949,7 @@ process:
* private list.
*/
if (ret) {
- clear_em_logging(tree, em);
+ clear_em_logging(inode, em);
free_extent_map(em);
continue;
}
@@ -4954,7 +4958,7 @@ process:
ret = log_one_extent(trans, inode, em, path, ctx);
write_lock(&tree->lock);
- clear_em_logging(tree, em);
+ clear_em_logging(inode, em);
free_extent_map(em);
}
WARN_ON(!list_empty(&extents));
diff --git a/fs/btrfs/tree-mod-log.c b/fs/btrfs/tree-mod-log.c
index 43b3accbed7a..fa45b5fb9683 100644
--- a/fs/btrfs/tree-mod-log.c
+++ b/fs/btrfs/tree-mod-log.c
@@ -1004,7 +1004,7 @@ struct extent_buffer *btrfs_get_old_root(struct btrfs_root *root, u64 time_seq)
free_extent_buffer(eb_root);
check.level = level;
- check.owner_root = root->root_key.objectid;
+ check.owner_root = btrfs_root_id(root);
old = read_tree_block(fs_info, logical, &check);
if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 1dc1f1946ae0..b6a701011fb0 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -692,6 +692,16 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
device->bdev = file_bdev(bdev_file);
clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
+ if (device->devt != device->bdev->bd_dev) {
+ btrfs_warn(NULL,
+ "device %s maj:min changed from %d:%d to %d:%d",
+ device->name->str, MAJOR(device->devt),
+ MINOR(device->devt), MAJOR(device->bdev->bd_dev),
+ MINOR(device->bdev->bd_dev));
+
+ device->devt = device->bdev->bd_dev;
+ }
+
fs_devices->open_devices++;
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
device->devid != BTRFS_DEV_REPLACE_DEVID) {
@@ -1174,23 +1184,30 @@ static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
struct btrfs_device *device;
struct btrfs_device *latest_dev = NULL;
struct btrfs_device *tmp_device;
+ int ret = 0;
list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
dev_list) {
- int ret;
+ int ret2;
- ret = btrfs_open_one_device(fs_devices, device, flags, holder);
- if (ret == 0 &&
+ ret2 = btrfs_open_one_device(fs_devices, device, flags, holder);
+ if (ret2 == 0 &&
(!latest_dev || device->generation > latest_dev->generation)) {
latest_dev = device;
- } else if (ret == -ENODATA) {
+ } else if (ret2 == -ENODATA) {
fs_devices->num_devices--;
list_del(&device->dev_list);
btrfs_free_device(device);
}
+ if (ret == 0 && ret2 != 0)
+ ret = ret2;
}
- if (fs_devices->open_devices == 0)
+
+ if (fs_devices->open_devices == 0) {
+ if (ret)
+ return ret;
return -EINVAL;
+ }
fs_devices->opened = 1;
fs_devices->latest_dev = latest_dev;
@@ -3438,6 +3455,7 @@ again:
* alignment and size).
*/
ret = -EUCLEAN;
+ mutex_unlock(&fs_info->reclaim_bgs_lock);
goto error;
}
@@ -5597,21 +5615,6 @@ struct btrfs_chunk_map *btrfs_alloc_chunk_map(int num_stripes, gfp_t gfp)
return map;
}
-struct btrfs_chunk_map *btrfs_clone_chunk_map(struct btrfs_chunk_map *map, gfp_t gfp)
-{
- const int size = btrfs_chunk_map_size(map->num_stripes);
- struct btrfs_chunk_map *clone;
-
- clone = kmemdup(map, size, gfp);
- if (!clone)
- return NULL;
-
- refcount_set(&clone->refs, 1);
- RB_CLEAR_NODE(&clone->rb_node);
-
- return clone;
-}
-
static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans,
struct alloc_chunk_ctl *ctl,
struct btrfs_device_info *devices_info)
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 93854609a4d5..66e6fc481ecd 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -92,6 +92,9 @@ enum btrfs_raid_types {
#define BTRFS_DEV_STATE_FLUSH_SENT (4)
#define BTRFS_DEV_STATE_NO_READA (5)
+/* Special value encoding failure to write primary super block. */
+#define BTRFS_SUPER_PRIMARY_WRITE_ERROR (INT_MAX / 2)
+
struct btrfs_fs_devices;
struct btrfs_device {
@@ -142,6 +145,12 @@ struct btrfs_device {
/* type and info about this device */
u64 type;
+ /*
+ * Counter of super block write errors, values larger than
+ * BTRFS_SUPER_PRIMARY_WRITE_ERROR encode primary super block write failure.
+ */
+ atomic_t sb_write_errors;
+
/* minimal io size for this device */
u32 sector_size;
@@ -743,7 +752,6 @@ struct btrfs_chunk_map *btrfs_alloc_chunk_map(int num_stripes, gfp_t gfp);
int btrfs_add_chunk_map(struct btrfs_fs_info *fs_info, struct btrfs_chunk_map *map);
#endif
-struct btrfs_chunk_map *btrfs_clone_chunk_map(struct btrfs_chunk_map *map, gfp_t gfp);
struct btrfs_chunk_map *btrfs_find_chunk_map(struct btrfs_fs_info *fs_info,
u64 logical, u64 length);
struct btrfs_chunk_map *btrfs_find_chunk_map_nolock(struct btrfs_fs_info *fs_info,
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 6287763fdccc..15d0999e340e 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -504,7 +504,7 @@ static int btrfs_initxattrs(struct inode *inode,
const struct xattr *xattr;
unsigned int nofs_flag;
char *name;
- int err = 0;
+ int ret = 0;
/*
* We're holding a transaction handle, so use a NOFS memory allocation
@@ -515,7 +515,7 @@ static int btrfs_initxattrs(struct inode *inode,
name = kmalloc(XATTR_SECURITY_PREFIX_LEN +
strlen(xattr->name) + 1, GFP_KERNEL);
if (!name) {
- err = -ENOMEM;
+ ret = -ENOMEM;
break;
}
strcpy(name, XATTR_SECURITY_PREFIX);
@@ -524,14 +524,14 @@ static int btrfs_initxattrs(struct inode *inode,
if (strcmp(name, XATTR_NAME_CAPS) == 0)
clear_bit(BTRFS_INODE_NO_CAP_XATTR, &BTRFS_I(inode)->runtime_flags);
- err = btrfs_setxattr(trans, inode, name, xattr->value,
+ ret = btrfs_setxattr(trans, inode, name, xattr->value,
xattr->value_len, 0);
kfree(name);
- if (err < 0)
+ if (ret < 0)
break;
}
memalloc_nofs_restore(nofs_flag);
- return err;
+ return ret;
}
int btrfs_xattr_security_init(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index e5b3f2003896..d9e5c88a0f85 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -91,24 +91,24 @@ fail:
return ERR_PTR(-ENOMEM);
}
-int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
- u64 start, struct page **pages, unsigned long *out_pages,
- unsigned long *total_in, unsigned long *total_out)
+int zlib_compress_folios(struct list_head *ws, struct address_space *mapping,
+ u64 start, struct folio **folios, unsigned long *out_folios,
+ unsigned long *total_in, unsigned long *total_out)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
int ret;
char *data_in = NULL;
- char *cpage_out;
- int nr_pages = 0;
- struct page *in_page = NULL;
- struct page *out_page = NULL;
+ char *cfolio_out;
+ int nr_folios = 0;
+ struct folio *in_folio = NULL;
+ struct folio *out_folio = NULL;
unsigned long bytes_left;
- unsigned int in_buf_pages;
+ unsigned int in_buf_folios;
unsigned long len = *total_out;
- unsigned long nr_dest_pages = *out_pages;
- const unsigned long max_out = nr_dest_pages * PAGE_SIZE;
+ unsigned long nr_dest_folios = *out_folios;
+ const unsigned long max_out = nr_dest_folios * PAGE_SIZE;
- *out_pages = 0;
+ *out_folios = 0;
*total_out = 0;
*total_in = 0;
@@ -121,18 +121,18 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
workspace->strm.total_in = 0;
workspace->strm.total_out = 0;
- out_page = btrfs_alloc_compr_page();
- if (out_page == NULL) {
+ out_folio = btrfs_alloc_compr_folio();
+ if (out_folio == NULL) {
ret = -ENOMEM;
goto out;
}
- cpage_out = page_address(out_page);
- pages[0] = out_page;
- nr_pages = 1;
+ cfolio_out = folio_address(out_folio);
+ folios[0] = out_folio;
+ nr_folios = 1;
workspace->strm.next_in = workspace->buf;
workspace->strm.avail_in = 0;
- workspace->strm.next_out = cpage_out;
+ workspace->strm.next_out = cfolio_out;
workspace->strm.avail_out = PAGE_SIZE;
while (workspace->strm.total_in < len) {
@@ -142,19 +142,22 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
*/
if (workspace->strm.avail_in == 0) {
bytes_left = len - workspace->strm.total_in;
- in_buf_pages = min(DIV_ROUND_UP(bytes_left, PAGE_SIZE),
- workspace->buf_size / PAGE_SIZE);
- if (in_buf_pages > 1) {
+ in_buf_folios = min(DIV_ROUND_UP(bytes_left, PAGE_SIZE),
+ workspace->buf_size / PAGE_SIZE);
+ if (in_buf_folios > 1) {
int i;
- for (i = 0; i < in_buf_pages; i++) {
+ for (i = 0; i < in_buf_folios; i++) {
if (data_in) {
kunmap_local(data_in);
- put_page(in_page);
+ folio_put(in_folio);
+ data_in = NULL;
}
- in_page = find_get_page(mapping,
- start >> PAGE_SHIFT);
- data_in = kmap_local_page(in_page);
+ ret = btrfs_compress_filemap_get_folio(mapping,
+ start, &in_folio);
+ if (ret < 0)
+ goto out;
+ data_in = kmap_local_folio(in_folio, 0);
copy_page(workspace->buf + i * PAGE_SIZE,
data_in);
start += PAGE_SIZE;
@@ -163,11 +166,14 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
} else {
if (data_in) {
kunmap_local(data_in);
- put_page(in_page);
+ folio_put(in_folio);
+ data_in = NULL;
}
- in_page = find_get_page(mapping,
- start >> PAGE_SHIFT);
- data_in = kmap_local_page(in_page);
+ ret = btrfs_compress_filemap_get_folio(mapping,
+ start, &in_folio);
+ if (ret < 0)
+ goto out;
+ data_in = kmap_local_folio(in_folio, 0);
start += PAGE_SIZE;
workspace->strm.next_in = data_in;
}
@@ -196,20 +202,20 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
* the stream end if required
*/
if (workspace->strm.avail_out == 0) {
- if (nr_pages == nr_dest_pages) {
+ if (nr_folios == nr_dest_folios) {
ret = -E2BIG;
goto out;
}
- out_page = btrfs_alloc_compr_page();
- if (out_page == NULL) {
+ out_folio = btrfs_alloc_compr_folio();
+ if (out_folio == NULL) {
ret = -ENOMEM;
goto out;
}
- cpage_out = page_address(out_page);
- pages[nr_pages] = out_page;
- nr_pages++;
+ cfolio_out = folio_address(out_folio);
+ folios[nr_folios] = out_folio;
+ nr_folios++;
workspace->strm.avail_out = PAGE_SIZE;
- workspace->strm.next_out = cpage_out;
+ workspace->strm.next_out = cfolio_out;
}
/* we're all done */
if (workspace->strm.total_in >= len)
@@ -231,21 +237,21 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
ret = -EIO;
goto out;
} else if (workspace->strm.avail_out == 0) {
- /* get another page for the stream end */
- if (nr_pages == nr_dest_pages) {
+ /* Get another folio for the stream end. */
+ if (nr_folios == nr_dest_folios) {
ret = -E2BIG;
goto out;
}
- out_page = btrfs_alloc_compr_page();
- if (out_page == NULL) {
+ out_folio = btrfs_alloc_compr_folio();
+ if (out_folio == NULL) {
ret = -ENOMEM;
goto out;
}
- cpage_out = page_address(out_page);
- pages[nr_pages] = out_page;
- nr_pages++;
+ cfolio_out = folio_address(out_folio);
+ folios[nr_folios] = out_folio;
+ nr_folios++;
workspace->strm.avail_out = PAGE_SIZE;
- workspace->strm.next_out = cpage_out;
+ workspace->strm.next_out = cfolio_out;
}
}
zlib_deflateEnd(&workspace->strm);
@@ -259,10 +265,10 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
*total_out = workspace->strm.total_out;
*total_in = workspace->strm.total_in;
out:
- *out_pages = nr_pages;
+ *out_folios = nr_folios;
if (data_in) {
kunmap_local(data_in);
- put_page(in_page);
+ folio_put(in_folio);
}
return ret;
@@ -275,13 +281,13 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
int wbits = MAX_WBITS;
char *data_in;
size_t total_out = 0;
- unsigned long page_in_index = 0;
+ unsigned long folio_in_index = 0;
size_t srclen = cb->compressed_len;
- unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
+ unsigned long total_folios_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
unsigned long buf_start;
- struct page **pages_in = cb->compressed_pages;
+ struct folio **folios_in = cb->compressed_folios;
- data_in = kmap_local_page(pages_in[page_in_index]);
+ data_in = kmap_local_folio(folios_in[folio_in_index], 0);
workspace->strm.next_in = data_in;
workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
workspace->strm.total_in = 0;
@@ -331,12 +337,12 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
if (workspace->strm.avail_in == 0) {
unsigned long tmp;
kunmap_local(data_in);
- page_in_index++;
- if (page_in_index >= total_pages_in) {
+ folio_in_index++;
+ if (folio_in_index >= total_folios_in) {
data_in = NULL;
break;
}
- data_in = kmap_local_page(pages_in[page_in_index]);
+ data_in = kmap_local_folio(folios_in[folio_in_index], 0);
workspace->strm.next_in = data_in;
tmp = srclen - workspace->strm.total_in;
workspace->strm.avail_in = min(tmp, PAGE_SIZE);
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 5a3d5ec75c5a..4cba80b34387 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -1574,11 +1574,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
if (!map)
return -EINVAL;
- cache->physical_map = btrfs_clone_chunk_map(map, GFP_NOFS);
- if (!cache->physical_map) {
- ret = -ENOMEM;
- goto out;
- }
+ cache->physical_map = map;
zone_info = kcalloc(map->num_stripes, sizeof(*zone_info), GFP_NOFS);
if (!zone_info) {
@@ -1690,7 +1686,6 @@ out:
}
bitmap_free(active);
kfree(zone_info);
- btrfs_free_chunk_map(map);
return ret;
}
@@ -2175,6 +2170,7 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
struct btrfs_chunk_map *map;
const bool is_metadata = (block_group->flags &
(BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM));
+ struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
int ret = 0;
int i;
@@ -2250,6 +2246,7 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
btrfs_clear_data_reloc_bg(block_group);
spin_unlock(&block_group->lock);
+ down_read(&dev_replace->rwsem);
map = block_group->physical_map;
for (i = 0; i < map->num_stripes; i++) {
struct btrfs_device *device = map->stripes[i].dev;
@@ -2266,13 +2263,16 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
zinfo->zone_size >> SECTOR_SHIFT);
memalloc_nofs_restore(nofs_flags);
- if (ret)
+ if (ret) {
+ up_read(&dev_replace->rwsem);
return ret;
+ }
if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA))
zinfo->reserved_active_zones++;
btrfs_dev_clear_active_zone(device, physical);
}
+ up_read(&dev_replace->rwsem);
if (!fully_written)
btrfs_dec_block_group_ro(block_group);
diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
index 92b3744b819b..2b232b82c3a8 100644
--- a/fs/btrfs/zstd.c
+++ b/fs/btrfs/zstd.c
@@ -374,25 +374,25 @@ fail:
return ERR_PTR(-ENOMEM);
}
-int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
- u64 start, struct page **pages, unsigned long *out_pages,
- unsigned long *total_in, unsigned long *total_out)
+int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
+ u64 start, struct folio **folios, unsigned long *out_folios,
+ unsigned long *total_in, unsigned long *total_out)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
zstd_cstream *stream;
int ret = 0;
- int nr_pages = 0;
- struct page *in_page = NULL; /* The current page to read */
- struct page *out_page = NULL; /* The current page to write to */
+ int nr_folios = 0;
+ struct folio *in_folio = NULL; /* The current folio to read. */
+ struct folio *out_folio = NULL; /* The current folio to write to. */
unsigned long tot_in = 0;
unsigned long tot_out = 0;
unsigned long len = *total_out;
- const unsigned long nr_dest_pages = *out_pages;
- unsigned long max_out = nr_dest_pages * PAGE_SIZE;
+ const unsigned long nr_dest_folios = *out_folios;
+ unsigned long max_out = nr_dest_folios * PAGE_SIZE;
zstd_parameters params = zstd_get_btrfs_parameters(workspace->req_level,
len);
- *out_pages = 0;
+ *out_folios = 0;
*total_out = 0;
*total_in = 0;
@@ -406,19 +406,21 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
}
/* map in the first page of input data */
- in_page = find_get_page(mapping, start >> PAGE_SHIFT);
- workspace->in_buf.src = kmap_local_page(in_page);
+ ret = btrfs_compress_filemap_get_folio(mapping, start, &in_folio);
+ if (ret < 0)
+ goto out;
+ workspace->in_buf.src = kmap_local_folio(in_folio, 0);
workspace->in_buf.pos = 0;
workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
/* Allocate and map in the output buffer */
- out_page = btrfs_alloc_compr_page();
- if (out_page == NULL) {
+ out_folio = btrfs_alloc_compr_folio();
+ if (out_folio == NULL) {
ret = -ENOMEM;
goto out;
}
- pages[nr_pages++] = out_page;
- workspace->out_buf.dst = page_address(out_page);
+ folios[nr_folios++] = out_folio;
+ workspace->out_buf.dst = folio_address(out_folio);
workspace->out_buf.pos = 0;
workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
@@ -453,17 +455,17 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
if (workspace->out_buf.pos == workspace->out_buf.size) {
tot_out += PAGE_SIZE;
max_out -= PAGE_SIZE;
- if (nr_pages == nr_dest_pages) {
+ if (nr_folios == nr_dest_folios) {
ret = -E2BIG;
goto out;
}
- out_page = btrfs_alloc_compr_page();
- if (out_page == NULL) {
+ out_folio = btrfs_alloc_compr_folio();
+ if (out_folio == NULL) {
ret = -ENOMEM;
goto out;
}
- pages[nr_pages++] = out_page;
- workspace->out_buf.dst = page_address(out_page);
+ folios[nr_folios++] = out_folio;
+ workspace->out_buf.dst = folio_address(out_folio);
workspace->out_buf.pos = 0;
workspace->out_buf.size = min_t(size_t, max_out,
PAGE_SIZE);
@@ -479,11 +481,14 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
if (workspace->in_buf.pos == workspace->in_buf.size) {
tot_in += PAGE_SIZE;
kunmap_local(workspace->in_buf.src);
- put_page(in_page);
+ workspace->in_buf.src = NULL;
+ folio_put(in_folio);
start += PAGE_SIZE;
len -= PAGE_SIZE;
- in_page = find_get_page(mapping, start >> PAGE_SHIFT);
- workspace->in_buf.src = kmap_local_page(in_page);
+ ret = btrfs_compress_filemap_get_folio(mapping, start, &in_folio);
+ if (ret < 0)
+ goto out;
+ workspace->in_buf.src = kmap_local_folio(in_folio, 0);
workspace->in_buf.pos = 0;
workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
}
@@ -510,17 +515,17 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
tot_out += PAGE_SIZE;
max_out -= PAGE_SIZE;
- if (nr_pages == nr_dest_pages) {
+ if (nr_folios == nr_dest_folios) {
ret = -E2BIG;
goto out;
}
- out_page = btrfs_alloc_compr_page();
- if (out_page == NULL) {
+ out_folio = btrfs_alloc_compr_folio();
+ if (out_folio == NULL) {
ret = -ENOMEM;
goto out;
}
- pages[nr_pages++] = out_page;
- workspace->out_buf.dst = page_address(out_page);
+ folios[nr_folios++] = out_folio;
+ workspace->out_buf.dst = folio_address(out_folio);
workspace->out_buf.pos = 0;
workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
}
@@ -534,10 +539,10 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
*total_in = tot_in;
*total_out = tot_out;
out:
- *out_pages = nr_pages;
+ *out_folios = nr_folios;
if (workspace->in_buf.src) {
kunmap_local(workspace->in_buf.src);
- put_page(in_page);
+ folio_put(in_folio);
}
return ret;
}
@@ -545,12 +550,12 @@ out:
int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
- struct page **pages_in = cb->compressed_pages;
+ struct folio **folios_in = cb->compressed_folios;
size_t srclen = cb->compressed_len;
zstd_dstream *stream;
int ret = 0;
- unsigned long page_in_index = 0;
- unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
+ unsigned long folio_in_index = 0;
+ unsigned long total_folios_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
unsigned long buf_start;
unsigned long total_out = 0;
@@ -562,7 +567,7 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
goto done;
}
- workspace->in_buf.src = kmap_local_page(pages_in[page_in_index]);
+ workspace->in_buf.src = kmap_local_folio(folios_in[folio_in_index], 0);
workspace->in_buf.pos = 0;
workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
@@ -599,14 +604,15 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
if (workspace->in_buf.pos == workspace->in_buf.size) {
kunmap_local(workspace->in_buf.src);
- page_in_index++;
- if (page_in_index >= total_pages_in) {
+ folio_in_index++;
+ if (folio_in_index >= total_folios_in) {
workspace->in_buf.src = NULL;
ret = -EIO;
goto done;
}
srclen -= PAGE_SIZE;
- workspace->in_buf.src = kmap_local_page(pages_in[page_in_index]);
+ workspace->in_buf.src =
+ kmap_local_folio(folios_in[folio_in_index], 0);
workspace->in_buf.pos = 0;
workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
}
diff --git a/fs/buffer.c b/fs/buffer.c
index 4f73d23c2c46..ed698caa8834 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -687,30 +687,37 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
}
EXPORT_SYMBOL(mark_buffer_dirty_inode);
-/*
- * Add a page to the dirty page list.
- *
- * It is a sad fact of life that this function is called from several places
- * deeply under spinlocking. It may not sleep.
- *
- * If the page has buffers, the uptodate buffers are set dirty, to preserve
- * dirty-state coherency between the page and the buffers. It the page does
- * not have buffers then when they are later attached they will all be set
- * dirty.
- *
- * The buffers are dirtied before the page is dirtied. There's a small race
- * window in which a writepage caller may see the page cleanness but not the
- * buffer dirtiness. That's fine. If this code were to set the page dirty
- * before the buffers, a concurrent writepage caller could clear the page dirty
- * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
- * page on the dirty page list.
- *
- * We use i_private_lock to lock against try_to_free_buffers while using the
- * page's buffer list. Also use this to protect against clean buffers being
- * added to the page after it was set dirty.
- *
- * FIXME: may need to call ->reservepage here as well. That's rather up to the
- * address_space though.
+/**
+ * block_dirty_folio - Mark a folio as dirty.
+ * @mapping: The address space containing this folio.
+ * @folio: The folio to mark dirty.
+ *
+ * Filesystems which use buffer_heads can use this function as their
+ * ->dirty_folio implementation. Some filesystems need to do a little
+ * work before calling this function. Filesystems which do not use
+ * buffer_heads should call filemap_dirty_folio() instead.
+ *
+ * If the folio has buffers, the uptodate buffers are set dirty, to
+ * preserve dirty-state coherency between the folio and the buffers.
+ * Buffers added to a dirty folio are created dirty.
+ *
+ * The buffers are dirtied before the folio is dirtied. There's a small
+ * race window in which writeback may see the folio cleanness but not the
+ * buffer dirtiness. That's fine. If this code were to set the folio
+ * dirty before the buffers, writeback could clear the folio dirty flag,
+ * see a bunch of clean buffers and we'd end up with dirty buffers/clean
+ * folio on the dirty folio list.
+ *
+ * We use i_private_lock to lock against try_to_free_buffers() while
+ * using the folio's buffer list. This also prevents clean buffers
+ * being added to the folio after it was set dirty.
+ *
+ * Context: May only be called from process context. Does not sleep.
+ * Caller must ensure that @folio cannot be truncated during this call,
+ * typically by holding the folio lock or having a page in the folio
+ * mapped and holding the page table lock.
+ *
+ * Return: True if the folio was dirtied; false if it was already dirtied.
*/
bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
{
@@ -1219,26 +1226,28 @@ void mark_buffer_write_io_error(struct buffer_head *bh)
}
EXPORT_SYMBOL(mark_buffer_write_io_error);
-/*
- * Decrement a buffer_head's reference count. If all buffers against a page
- * have zero reference count, are clean and unlocked, and if the page is clean
- * and unlocked then try_to_free_buffers() may strip the buffers from the page
- * in preparation for freeing it (sometimes, rarely, buffers are removed from
- * a page but it ends up not being freed, and buffers may later be reattached).
+/**
+ * __brelse - Release a buffer.
+ * @bh: The buffer to release.
+ *
+ * This variant of brelse() can be called if @bh is guaranteed to not be NULL.
*/
-void __brelse(struct buffer_head * buf)
+void __brelse(struct buffer_head *bh)
{
- if (atomic_read(&buf->b_count)) {
- put_bh(buf);
+ if (atomic_read(&bh->b_count)) {
+ put_bh(bh);
return;
}
WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
}
EXPORT_SYMBOL(__brelse);
-/*
- * bforget() is like brelse(), except it discards any
- * potentially dirty data.
+/**
+ * __bforget - Discard any dirty data in a buffer.
+ * @bh: The buffer to forget.
+ *
+ * This variant of bforget() can be called if @bh is guaranteed to not
+ * be NULL.
*/
void __bforget(struct buffer_head *bh)
{
@@ -1415,6 +1424,11 @@ EXPORT_SYMBOL(__find_get_block);
* @size: The size of buffer_heads for this @bdev.
* @gfp: The memory allocation flags to use.
*
+ * The returned buffer head has its reference count incremented, but is
+ * not locked. The caller should call brelse() when it has finished
+ * with the buffer. The buffer may not be uptodate. If needed, the
+ * caller can bring it uptodate either by reading it or overwriting it.
+ *
* Return: The buffer head, or NULL if memory could not be allocated.
*/
struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
@@ -1446,20 +1460,29 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
EXPORT_SYMBOL(__breadahead);
/**
- * __bread_gfp() - reads a specified block and returns the bh
- * @bdev: the block_device to read from
- * @block: number of block
- * @size: size (in bytes) to read
- * @gfp: page allocation flag
- *
- * Reads a specified block, and returns buffer head that contains it.
- * The page cache can be allocated from non-movable area
- * not to prevent page migration if you set gfp to zero.
- * It returns NULL if the block was unreadable.
+ * __bread_gfp() - Read a block.
+ * @bdev: The block device to read from.
+ * @block: Block number in units of block size.
+ * @size: The block size of this device in bytes.
+ * @gfp: Not page allocation flags; see below.
+ *
+ * You are not expected to call this function. You should use one of
+ * sb_bread(), sb_bread_unmovable() or __bread().
+ *
+ * Read a specified block, and return the buffer head that refers to it.
+ * If @gfp is 0, the memory will be allocated using the block device's
+ * default GFP flags. If @gfp is __GFP_MOVABLE, the memory may be
+ * allocated from a movable area. Do not pass in a complete set of
+ * GFP flags.
+ *
+ * The returned buffer head has its refcount increased. The caller should
+ * call brelse() when it has finished with the buffer.
+ *
+ * Context: May sleep waiting for I/O.
+ * Return: NULL if the block was unreadable.
*/
-struct buffer_head *
-__bread_gfp(struct block_device *bdev, sector_t block,
- unsigned size, gfp_t gfp)
+struct buffer_head *__bread_gfp(struct block_device *bdev, sector_t block,
+ unsigned size, gfp_t gfp)
{
struct buffer_head *bh;
@@ -2861,26 +2884,6 @@ int sync_dirty_buffer(struct buffer_head *bh)
}
EXPORT_SYMBOL(sync_dirty_buffer);
-/*
- * try_to_free_buffers() checks if all the buffers on this particular folio
- * are unused, and releases them if so.
- *
- * Exclusion against try_to_free_buffers may be obtained by either
- * locking the folio or by holding its mapping's i_private_lock.
- *
- * If the folio is dirty but all the buffers are clean then we need to
- * be sure to mark the folio clean as well. This is because the folio
- * may be against a block device, and a later reattachment of buffers
- * to a dirty folio will set *all* buffers dirty. Which would corrupt
- * filesystem data on the same device.
- *
- * The same applies to regular filesystem folios: if all the buffers are
- * clean then we set the folio clean and proceed. To do that, we require
- * total exclusion from block_dirty_folio(). That is obtained with
- * i_private_lock.
- *
- * try_to_free_buffers() is non-blocking.
- */
static inline int buffer_busy(struct buffer_head *bh)
{
return atomic_read(&bh->b_count) |
@@ -2914,6 +2917,30 @@ failed:
return false;
}
+/**
+ * try_to_free_buffers - Release buffers attached to this folio.
+ * @folio: The folio.
+ *
+ * If any buffers are in use (dirty, under writeback, elevated refcount),
+ * no buffers will be freed.
+ *
+ * If the folio is dirty but all the buffers are clean then we need to
+ * be sure to mark the folio clean as well. This is because the folio
+ * may be against a block device, and a later reattachment of buffers
+ * to a dirty folio will set *all* buffers dirty. Which would corrupt
+ * filesystem data on the same device.
+ *
+ * The same applies to regular filesystem folios: if all the buffers are
+ * clean then we set the folio clean and proceed. To do that, we require
+ * total exclusion from block_dirty_folio(). That is obtained with
+ * i_private_lock.
+ *
+ * Exclusion against try_to_free_buffers may be obtained by either
+ * locking the folio or by holding its mapping's i_private_lock.
+ *
+ * Context: Process context. @folio must be locked. Will not sleep.
+ * Return: true if all buffers attached to this folio were freed.
+ */
bool try_to_free_buffers(struct folio *folio)
{
struct address_space * const mapping = folio->mapping;
diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c
index 1d685357e67f..e667dbcd20e8 100644
--- a/fs/cachefiles/io.c
+++ b/fs/cachefiles/io.c
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/uio.h>
+#include <linux/bio.h>
#include <linux/falloc.h>
#include <linux/sched/mm.h>
#include <trace/events/fscache.h>
@@ -493,7 +494,7 @@ out_no_object:
* boundary as appropriate.
*/
static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq,
- loff_t i_size)
+ unsigned long long i_size)
{
return cachefiles_do_prepare_read(&subreq->rreq->cache_resources,
subreq->start, &subreq->len, i_size,
@@ -622,6 +623,77 @@ static int cachefiles_prepare_write(struct netfs_cache_resources *cres,
return ret;
}
+static void cachefiles_prepare_write_subreq(struct netfs_io_subrequest *subreq)
+{
+ struct netfs_io_request *wreq = subreq->rreq;
+ struct netfs_cache_resources *cres = &wreq->cache_resources;
+
+ _enter("W=%x[%x] %llx", wreq->debug_id, subreq->debug_index, subreq->start);
+
+ subreq->max_len = ULONG_MAX;
+ subreq->max_nr_segs = BIO_MAX_VECS;
+
+ if (!cachefiles_cres_file(cres)) {
+ if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE))
+ return netfs_prepare_write_failed(subreq);
+ if (!cachefiles_cres_file(cres))
+ return netfs_prepare_write_failed(subreq);
+ }
+}
+
+static void cachefiles_issue_write(struct netfs_io_subrequest *subreq)
+{
+ struct netfs_io_request *wreq = subreq->rreq;
+ struct netfs_cache_resources *cres = &wreq->cache_resources;
+ struct cachefiles_object *object = cachefiles_cres_object(cres);
+ struct cachefiles_cache *cache = object->volume->cache;
+ const struct cred *saved_cred;
+ size_t off, pre, post, len = subreq->len;
+ loff_t start = subreq->start;
+ int ret;
+
+ _enter("W=%x[%x] %llx-%llx",
+ wreq->debug_id, subreq->debug_index, start, start + len - 1);
+
+ /* We need to start on the cache granularity boundary */
+ off = start & (CACHEFILES_DIO_BLOCK_SIZE - 1);
+ if (off) {
+ pre = CACHEFILES_DIO_BLOCK_SIZE - off;
+ if (pre >= len) {
+ netfs_write_subrequest_terminated(subreq, len, false);
+ return;
+ }
+ subreq->transferred += pre;
+ start += pre;
+ len -= pre;
+ iov_iter_advance(&subreq->io_iter, pre);
+ }
+
+ /* We also need to end on the cache granularity boundary */
+ post = len & (CACHEFILES_DIO_BLOCK_SIZE - 1);
+ if (post) {
+ len -= post;
+ if (len == 0) {
+ netfs_write_subrequest_terminated(subreq, post, false);
+ return;
+ }
+ iov_iter_truncate(&subreq->io_iter, len);
+ }
+
+ cachefiles_begin_secure(cache, &saved_cred);
+ ret = __cachefiles_prepare_write(object, cachefiles_cres_file(cres),
+ &start, &len, len, true);
+ cachefiles_end_secure(cache, saved_cred);
+ if (ret < 0) {
+ netfs_write_subrequest_terminated(subreq, ret, false);
+ return;
+ }
+
+ cachefiles_write(&subreq->rreq->cache_resources,
+ subreq->start, &subreq->io_iter,
+ netfs_write_subrequest_terminated, subreq);
+}
+
/*
* Clean up an operation.
*/
@@ -638,8 +710,10 @@ static const struct netfs_cache_ops cachefiles_netfs_cache_ops = {
.end_operation = cachefiles_end_operation,
.read = cachefiles_read,
.write = cachefiles_write,
+ .issue_write = cachefiles_issue_write,
.prepare_read = cachefiles_prepare_read,
.prepare_write = cachefiles_prepare_write,
+ .prepare_write_subreq = cachefiles_prepare_write_subreq,
.prepare_ondemand_read = cachefiles_prepare_ondemand_read,
.query_occupancy = cachefiles_query_occupancy,
};
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 1340d77124ae..8c16bc5250ef 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -193,7 +193,7 @@ static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
* block, but do not exceed the file size, unless the original
* request already exceeds it.
*/
- new_end = min(round_up(end, lo->stripe_unit), rreq->i_size);
+ new_end = umin(round_up(end, lo->stripe_unit), rreq->i_size);
if (new_end > end && new_end <= rreq->start + max_len)
rreq->len = new_end - rreq->start;
@@ -498,11 +498,6 @@ const struct netfs_request_ops ceph_netfs_ops = {
};
#ifdef CONFIG_CEPH_FSCACHE
-static void ceph_set_page_fscache(struct page *page)
-{
- set_page_fscache(page);
-}
-
static void ceph_fscache_write_terminated(void *priv, ssize_t error, bool was_async)
{
struct inode *inode = priv;
@@ -517,13 +512,9 @@ static void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, b
struct fscache_cookie *cookie = ceph_fscache_cookie(ci);
fscache_write_to_cache(cookie, inode->i_mapping, off, len, i_size_read(inode),
- ceph_fscache_write_terminated, inode, caching);
+ ceph_fscache_write_terminated, inode, true, caching);
}
#else
-static inline void ceph_set_page_fscache(struct page *page)
-{
-}
-
static inline void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching)
{
}
@@ -715,8 +706,6 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
len = wlen;
set_page_writeback(page);
- if (caching)
- ceph_set_page_fscache(page);
ceph_fscache_write_to_cache(inode, page_off, len, caching);
if (IS_ENCRYPTED(inode)) {
@@ -795,10 +784,10 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
ihold(inode);
if (wbc->sync_mode == WB_SYNC_NONE &&
- ceph_inode_to_fs_client(inode)->write_congested)
+ ceph_inode_to_fs_client(inode)->write_congested) {
+ redirty_page_for_writepage(wbc, page);
return AOP_WRITEPAGE_ACTIVATE;
-
- wait_on_page_fscache(page);
+ }
err = writepage_nounlock(page, wbc);
if (err == -ERESTARTSYS) {
@@ -1073,7 +1062,7 @@ get_more_pages:
unlock_page(page);
break;
}
- if (PageWriteback(page) || PageFsCache(page)) {
+ if (PageWriteback(page)) {
if (wbc->sync_mode == WB_SYNC_NONE) {
doutc(cl, "%p under writeback\n", page);
unlock_page(page);
@@ -1081,7 +1070,6 @@ get_more_pages:
}
doutc(cl, "waiting on writeback %p\n", page);
wait_on_page_writeback(page);
- wait_on_page_fscache(page);
}
if (!clear_page_dirty_for_io(page)) {
@@ -1266,8 +1254,6 @@ new_request:
}
set_page_writeback(page);
- if (caching)
- ceph_set_page_fscache(page);
len += thp_size(page);
}
ceph_fscache_write_to_cache(inode, offset, len, caching);
@@ -1511,7 +1497,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
if (r < 0)
return r;
- folio_wait_fscache(folio);
+ folio_wait_private_2(folio); /* [DEPRECATED] */
WARN_ON_ONCE(!folio_test_locked(folio));
*pagep = &folio->page;
return 0;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 55051ad09c19..c4941ba245ac 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -4783,13 +4783,13 @@ int ceph_drop_caps_for_unlink(struct inode *inode)
doutc(mdsc->fsc->client, "%p %llx.%llx\n", inode,
ceph_vinop(inode));
- spin_lock(&mdsc->cap_unlink_delay_lock);
+ spin_lock(&mdsc->cap_delay_lock);
ci->i_ceph_flags |= CEPH_I_FLUSH;
if (!list_empty(&ci->i_cap_delay_list))
list_del_init(&ci->i_cap_delay_list);
list_add_tail(&ci->i_cap_delay_list,
&mdsc->cap_unlink_delay_list);
- spin_unlock(&mdsc->cap_unlink_delay_lock);
+ spin_unlock(&mdsc->cap_delay_lock);
/*
* Fire the work immediately, because the MDS maybe
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 7b2e77517f23..99561fddcb38 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -577,6 +577,8 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
/* Set parameters for the netfs library */
netfs_inode_init(&ci->netfs, &ceph_netfs_ops, false);
+ /* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */
+ __set_bit(NETFS_ICTX_USE_PGPRIV2, &ci->netfs.flags);
spin_lock_init(&ci->i_ceph_lock);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 3ab9c268a8bb..360b686c3c67 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2504,7 +2504,7 @@ static void ceph_cap_unlink_work(struct work_struct *work)
struct ceph_client *cl = mdsc->fsc->client;
doutc(cl, "begin\n");
- spin_lock(&mdsc->cap_unlink_delay_lock);
+ spin_lock(&mdsc->cap_delay_lock);
while (!list_empty(&mdsc->cap_unlink_delay_list)) {
struct ceph_inode_info *ci;
struct inode *inode;
@@ -2516,15 +2516,15 @@ static void ceph_cap_unlink_work(struct work_struct *work)
inode = igrab(&ci->netfs.inode);
if (inode) {
- spin_unlock(&mdsc->cap_unlink_delay_lock);
+ spin_unlock(&mdsc->cap_delay_lock);
doutc(cl, "on %p %llx.%llx\n", inode,
ceph_vinop(inode));
ceph_check_caps(ci, CHECK_CAPS_FLUSH);
iput(inode);
- spin_lock(&mdsc->cap_unlink_delay_lock);
+ spin_lock(&mdsc->cap_delay_lock);
}
}
- spin_unlock(&mdsc->cap_unlink_delay_lock);
+ spin_unlock(&mdsc->cap_delay_lock);
doutc(cl, "done\n");
}
@@ -5404,7 +5404,6 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
INIT_LIST_HEAD(&mdsc->cap_wait_list);
spin_lock_init(&mdsc->cap_delay_lock);
INIT_LIST_HEAD(&mdsc->cap_unlink_delay_list);
- spin_lock_init(&mdsc->cap_unlink_delay_lock);
INIT_LIST_HEAD(&mdsc->snap_flush_list);
spin_lock_init(&mdsc->snap_flush_lock);
mdsc->last_cap_flush_tid = 1;
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 03f8ff00874f..b88e80415224 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -461,9 +461,8 @@ struct ceph_mds_client {
struct delayed_work delayed_work; /* delayed work */
unsigned long last_renew_caps; /* last time we renewed our caps */
struct list_head cap_delay_list; /* caps with delayed release */
- spinlock_t cap_delay_lock; /* protects cap_delay_list */
struct list_head cap_unlink_delay_list; /* caps with delayed release for unlink */
- spinlock_t cap_unlink_delay_lock; /* protects cap_unlink_delay_list */
+ spinlock_t cap_delay_lock; /* protects cap_delay_list and cap_unlink_delay_list */
struct list_head snap_flush_list; /* cap_snaps ready to flush */
spinlock_t snap_flush_lock;
diff --git a/fs/coredump.c b/fs/coredump.c
index be6403b4b14b..317065e3eb9b 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -56,10 +56,15 @@
static bool dump_vma_snapshot(struct coredump_params *cprm);
static void free_vma_snapshot(struct coredump_params *cprm);
+#define CORE_FILE_NOTE_SIZE_DEFAULT (4*1024*1024)
+/* Define a reasonable max cap */
+#define CORE_FILE_NOTE_SIZE_MAX (16*1024*1024)
+
static int core_uses_pid;
static unsigned int core_pipe_limit;
static char core_pattern[CORENAME_MAX_SIZE] = "core";
static int core_name_size = CORENAME_MAX_SIZE;
+unsigned int core_file_note_size_limit = CORE_FILE_NOTE_SIZE_DEFAULT;
struct core_name {
char *corename;
@@ -998,6 +1003,9 @@ static int proc_dostring_coredump(struct ctl_table *table, int write,
return error;
}
+static const unsigned int core_file_note_size_min = CORE_FILE_NOTE_SIZE_DEFAULT;
+static const unsigned int core_file_note_size_max = CORE_FILE_NOTE_SIZE_MAX;
+
static struct ctl_table coredump_sysctls[] = {
{
.procname = "core_uses_pid",
@@ -1020,6 +1028,15 @@ static struct ctl_table coredump_sysctls[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "core_file_note_size_limit",
+ .data = &core_file_note_size_limit,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ .extra1 = (unsigned int *)&core_file_note_size_min,
+ .extra2 = (unsigned int *)&core_file_note_size_max,
+ },
};
static int __init init_fs_coredump_sysctls(void)
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 39e75131fd5a..9901057a15ba 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -495,7 +495,7 @@ static void cramfs_kill_sb(struct super_block *sb)
sb->s_mtd = NULL;
} else if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV) && sb->s_bdev) {
sync_blockdev(sb->s_bdev);
- fput(sb->s_bdev_file);
+ bdev_fput(sb->s_bdev_file);
}
kfree(sbi);
}
diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c
index 104771c3d3f6..d8d5049b8fe1 100644
--- a/fs/crypto/hooks.c
+++ b/fs/crypto/hooks.c
@@ -30,21 +30,41 @@
int fscrypt_file_open(struct inode *inode, struct file *filp)
{
int err;
- struct dentry *dir;
+ struct dentry *dentry, *dentry_parent;
+ struct inode *inode_parent;
err = fscrypt_require_key(inode);
if (err)
return err;
- dir = dget_parent(file_dentry(filp));
- if (IS_ENCRYPTED(d_inode(dir)) &&
- !fscrypt_has_permitted_context(d_inode(dir), inode)) {
+ dentry = file_dentry(filp);
+
+ /*
+ * Getting a reference to the parent dentry is needed for the actual
+ * encryption policy comparison, but it's expensive on multi-core
+ * systems. Since this function runs on unencrypted files too, start
+ * with a lightweight RCU-mode check for the parent directory being
+ * unencrypted (in which case it's fine for the child to be either
+ * unencrypted, or encrypted with any policy). Only continue on to the
+ * full policy check if the parent directory is actually encrypted.
+ */
+ rcu_read_lock();
+ dentry_parent = READ_ONCE(dentry->d_parent);
+ inode_parent = d_inode_rcu(dentry_parent);
+ if (inode_parent != NULL && !IS_ENCRYPTED(inode_parent)) {
+ rcu_read_unlock();
+ return 0;
+ }
+ rcu_read_unlock();
+
+ dentry_parent = dget_parent(dentry);
+ if (!fscrypt_has_permitted_context(d_inode(dentry_parent), inode)) {
fscrypt_warn(inode,
"Inconsistent encryption context (parent directory: %lu)",
- d_inode(dir)->i_ino);
+ d_inode(dentry_parent)->i_ino);
err = -EPERM;
}
- dput(dir);
+ dput(dentry_parent);
return err;
}
EXPORT_SYMBOL_GPL(fscrypt_file_open);
diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c
index b4002aea7cdb..40de69860dcf 100644
--- a/fs/crypto/inline_crypt.c
+++ b/fs/crypto/inline_crypt.c
@@ -284,7 +284,7 @@ static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh,
const struct inode **inode_ret,
u64 *lblk_num_ret)
{
- struct page *page = bh->b_page;
+ struct folio *folio = bh->b_folio;
const struct address_space *mapping;
const struct inode *inode;
@@ -292,13 +292,13 @@ static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh,
* The ext4 journal (jbd2) can submit a buffer_head it directly created
* for a non-pagecache page. fscrypt doesn't care about these.
*/
- mapping = page_mapping(page);
+ mapping = folio_mapping(folio);
if (!mapping)
return false;
inode = mapping->host;
*inode_ret = inode;
- *lblk_num_ret = ((u64)page->index << (PAGE_SHIFT - inode->i_blkbits)) +
+ *lblk_num_ret = ((u64)folio->index << (PAGE_SHIFT - inode->i_blkbits)) +
(bh_offset(bh) >> inode->i_blkbits);
return true;
}
diff --git a/fs/dax.c b/fs/dax.c
index 423fc1607dfa..becb4a6920c6 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1207,17 +1207,17 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
struct vm_area_struct *vma = vmf->vma;
struct inode *inode = mapping->host;
pgtable_t pgtable = NULL;
- struct page *zero_page;
+ struct folio *zero_folio;
spinlock_t *ptl;
pmd_t pmd_entry;
pfn_t pfn;
- zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
+ zero_folio = mm_get_huge_zero_folio(vmf->vma->vm_mm);
- if (unlikely(!zero_page))
+ if (unlikely(!zero_folio))
goto fallback;
- pfn = page_to_pfn_t(zero_page);
+ pfn = page_to_pfn_t(&zero_folio->page);
*entry = dax_insert_entry(xas, vmf, iter, *entry, pfn,
DAX_PMD | DAX_ZERO_PAGE);
@@ -1237,17 +1237,17 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
mm_inc_nr_ptes(vma->vm_mm);
}
- pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
+ pmd_entry = mk_pmd(&zero_folio->page, vmf->vma->vm_page_prot);
pmd_entry = pmd_mkhuge(pmd_entry);
set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
spin_unlock(ptl);
- trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
+ trace_dax_pmd_load_hole(inode, vmf, zero_folio, *entry);
return VM_FAULT_NOPAGE;
fallback:
if (pgtable)
pte_free(vma->vm_mm, pgtable);
- trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
+ trace_dax_pmd_load_hole_fallback(inode, vmf, zero_folio, *entry);
return VM_FAULT_FALLBACK;
}
#else
diff --git a/fs/dcache.c b/fs/dcache.c
index 71a8e943a0fa..407095188f83 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -355,7 +355,7 @@ static inline void __d_clear_type_and_inode(struct dentry *dentry)
flags &= ~DCACHE_ENTRY_TYPE;
WRITE_ONCE(dentry->d_flags, flags);
dentry->d_inode = NULL;
- if (dentry->d_flags & DCACHE_LRU_LIST)
+ if (flags & DCACHE_LRU_LIST)
this_cpu_inc(nr_dentry_negative);
}
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index a40da0065433..dc51df0b118d 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -14,7 +14,8 @@
#include <linux/module.h>
#include <linux/fs.h>
-#include <linux/mount.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/kobject.h>
@@ -23,7 +24,6 @@
#include <linux/fsnotify.h>
#include <linux/string.h>
#include <linux/seq_file.h>
-#include <linux/parser.h>
#include <linux/magic.h>
#include <linux/slab.h>
#include <linux/security.h>
@@ -77,7 +77,7 @@ static struct inode *debugfs_get_inode(struct super_block *sb)
return inode;
}
-struct debugfs_mount_opts {
+struct debugfs_fs_info {
kuid_t uid;
kgid_t gid;
umode_t mode;
@@ -89,68 +89,51 @@ enum {
Opt_uid,
Opt_gid,
Opt_mode,
- Opt_err
};
-static const match_table_t tokens = {
- {Opt_uid, "uid=%u"},
- {Opt_gid, "gid=%u"},
- {Opt_mode, "mode=%o"},
- {Opt_err, NULL}
+static const struct fs_parameter_spec debugfs_param_specs[] = {
+ fsparam_u32 ("gid", Opt_gid),
+ fsparam_u32oct ("mode", Opt_mode),
+ fsparam_u32 ("uid", Opt_uid),
+ {}
};
-struct debugfs_fs_info {
- struct debugfs_mount_opts mount_opts;
-};
-
-static int debugfs_parse_options(char *data, struct debugfs_mount_opts *opts)
+static int debugfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- substring_t args[MAX_OPT_ARGS];
- int option;
- int token;
+ struct debugfs_fs_info *opts = fc->s_fs_info;
+ struct fs_parse_result result;
kuid_t uid;
kgid_t gid;
- char *p;
-
- opts->opts = 0;
- opts->mode = DEBUGFS_DEFAULT_MODE;
-
- while ((p = strsep(&data, ",")) != NULL) {
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_uid:
- if (match_int(&args[0], &option))
- return -EINVAL;
- uid = make_kuid(current_user_ns(), option);
- if (!uid_valid(uid))
- return -EINVAL;
- opts->uid = uid;
- break;
- case Opt_gid:
- if (match_int(&args[0], &option))
- return -EINVAL;
- gid = make_kgid(current_user_ns(), option);
- if (!gid_valid(gid))
- return -EINVAL;
- opts->gid = gid;
- break;
- case Opt_mode:
- if (match_octal(&args[0], &option))
- return -EINVAL;
- opts->mode = option & S_IALLUGO;
- break;
- /*
- * We might like to report bad mount options here;
- * but traditionally debugfs has ignored all mount options
- */
- }
-
- opts->opts |= BIT(token);
+ int opt;
+
+ opt = fs_parse(fc, debugfs_param_specs, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_uid:
+ uid = make_kuid(current_user_ns(), result.uint_32);
+ if (!uid_valid(uid))
+ return invalf(fc, "Unknown uid");
+ opts->uid = uid;
+ break;
+ case Opt_gid:
+ gid = make_kgid(current_user_ns(), result.uint_32);
+ if (!gid_valid(gid))
+ return invalf(fc, "Unknown gid");
+ opts->gid = gid;
+ break;
+ case Opt_mode:
+ opts->mode = result.uint_32 & S_IALLUGO;
+ break;
+ /*
+ * We might like to report bad mount options here;
+ * but traditionally debugfs has ignored all mount options
+ */
}
+ opts->opts |= BIT(opt);
+
return 0;
}
@@ -158,23 +141,22 @@ static void _debugfs_apply_options(struct super_block *sb, bool remount)
{
struct debugfs_fs_info *fsi = sb->s_fs_info;
struct inode *inode = d_inode(sb->s_root);
- struct debugfs_mount_opts *opts = &fsi->mount_opts;
/*
* On remount, only reset mode/uid/gid if they were provided as mount
* options.
*/
- if (!remount || opts->opts & BIT(Opt_mode)) {
+ if (!remount || fsi->opts & BIT(Opt_mode)) {
inode->i_mode &= ~S_IALLUGO;
- inode->i_mode |= opts->mode;
+ inode->i_mode |= fsi->mode;
}
- if (!remount || opts->opts & BIT(Opt_uid))
- inode->i_uid = opts->uid;
+ if (!remount || fsi->opts & BIT(Opt_uid))
+ inode->i_uid = fsi->uid;
- if (!remount || opts->opts & BIT(Opt_gid))
- inode->i_gid = opts->gid;
+ if (!remount || fsi->opts & BIT(Opt_gid))
+ inode->i_gid = fsi->gid;
}
static void debugfs_apply_options(struct super_block *sb)
@@ -187,35 +169,33 @@ static void debugfs_apply_options_remount(struct super_block *sb)
_debugfs_apply_options(sb, true);
}
-static int debugfs_remount(struct super_block *sb, int *flags, char *data)
+static int debugfs_reconfigure(struct fs_context *fc)
{
- int err;
- struct debugfs_fs_info *fsi = sb->s_fs_info;
+ struct super_block *sb = fc->root->d_sb;
+ struct debugfs_fs_info *sb_opts = sb->s_fs_info;
+ struct debugfs_fs_info *new_opts = fc->s_fs_info;
sync_filesystem(sb);
- err = debugfs_parse_options(data, &fsi->mount_opts);
- if (err)
- goto fail;
+ /* structure copy of new mount options to sb */
+ *sb_opts = *new_opts;
debugfs_apply_options_remount(sb);
-fail:
- return err;
+ return 0;
}
static int debugfs_show_options(struct seq_file *m, struct dentry *root)
{
struct debugfs_fs_info *fsi = root->d_sb->s_fs_info;
- struct debugfs_mount_opts *opts = &fsi->mount_opts;
- if (!uid_eq(opts->uid, GLOBAL_ROOT_UID))
+ if (!uid_eq(fsi->uid, GLOBAL_ROOT_UID))
seq_printf(m, ",uid=%u",
- from_kuid_munged(&init_user_ns, opts->uid));
- if (!gid_eq(opts->gid, GLOBAL_ROOT_GID))
+ from_kuid_munged(&init_user_ns, fsi->uid));
+ if (!gid_eq(fsi->gid, GLOBAL_ROOT_GID))
seq_printf(m, ",gid=%u",
- from_kgid_munged(&init_user_ns, opts->gid));
- if (opts->mode != DEBUGFS_DEFAULT_MODE)
- seq_printf(m, ",mode=%o", opts->mode);
+ from_kgid_munged(&init_user_ns, fsi->gid));
+ if (fsi->mode != DEBUGFS_DEFAULT_MODE)
+ seq_printf(m, ",mode=%o", fsi->mode);
return 0;
}
@@ -229,7 +209,6 @@ static void debugfs_free_inode(struct inode *inode)
static const struct super_operations debugfs_super_operations = {
.statfs = simple_statfs,
- .remount_fs = debugfs_remount,
.show_options = debugfs_show_options,
.free_inode = debugfs_free_inode,
};
@@ -263,26 +242,14 @@ static const struct dentry_operations debugfs_dops = {
.d_automount = debugfs_automount,
};
-static int debug_fill_super(struct super_block *sb, void *data, int silent)
+static int debugfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
static const struct tree_descr debug_files[] = {{""}};
- struct debugfs_fs_info *fsi;
int err;
- fsi = kzalloc(sizeof(struct debugfs_fs_info), GFP_KERNEL);
- sb->s_fs_info = fsi;
- if (!fsi) {
- err = -ENOMEM;
- goto fail;
- }
-
- err = debugfs_parse_options(data, &fsi->mount_opts);
+ err = simple_fill_super(sb, DEBUGFS_MAGIC, debug_files);
if (err)
- goto fail;
-
- err = simple_fill_super(sb, DEBUGFS_MAGIC, debug_files);
- if (err)
- goto fail;
+ return err;
sb->s_op = &debugfs_super_operations;
sb->s_d_op = &debugfs_dops;
@@ -290,27 +257,48 @@ static int debug_fill_super(struct super_block *sb, void *data, int silent)
debugfs_apply_options(sb);
return 0;
-
-fail:
- kfree(fsi);
- sb->s_fs_info = NULL;
- return err;
}
-static struct dentry *debug_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static int debugfs_get_tree(struct fs_context *fc)
{
if (!(debugfs_allow & DEBUGFS_ALLOW_API))
- return ERR_PTR(-EPERM);
+ return -EPERM;
+
+ return get_tree_single(fc, debugfs_fill_super);
+}
+
+static void debugfs_free_fc(struct fs_context *fc)
+{
+ kfree(fc->s_fs_info);
+}
- return mount_single(fs_type, flags, data, debug_fill_super);
+static const struct fs_context_operations debugfs_context_ops = {
+ .free = debugfs_free_fc,
+ .parse_param = debugfs_parse_param,
+ .get_tree = debugfs_get_tree,
+ .reconfigure = debugfs_reconfigure,
+};
+
+static int debugfs_init_fs_context(struct fs_context *fc)
+{
+ struct debugfs_fs_info *fsi;
+
+ fsi = kzalloc(sizeof(struct debugfs_fs_info), GFP_KERNEL);
+ if (!fsi)
+ return -ENOMEM;
+
+ fsi->mode = DEBUGFS_DEFAULT_MODE;
+
+ fc->s_fs_info = fsi;
+ fc->ops = &debugfs_context_ops;
+ return 0;
}
static struct file_system_type debug_fs_type = {
.owner = THIS_MODULE,
.name = "debugfs",
- .mount = debug_mount,
+ .init_fs_context = debugfs_init_fs_context,
+ .parameters = debugfs_param_specs,
.kill_sb = kill_litter_super,
};
MODULE_ALIAS_FS("debugfs");
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 62c97ff9e852..b0aafe640fa4 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1217,7 +1217,6 @@ ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
*/
inode_dio_begin(inode);
- retval = 0;
sdio.blkbits = blkbits;
sdio.blkfactor = i_blkbits - blkbits;
sdio.block_in_file = offset >> blkbits;
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index 1f2f70a1b824..59711486d801 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -12,47 +12,50 @@
#include <trace/events/dlm.h>
#include "dlm_internal.h"
+#include "lvb_table.h"
#include "memory.h"
#include "lock.h"
#include "user.h"
#include "ast.h"
-void dlm_release_callback(struct kref *ref)
+static void dlm_callback_work(struct work_struct *work)
{
- struct dlm_callback *cb = container_of(ref, struct dlm_callback, ref);
+ struct dlm_callback *cb = container_of(work, struct dlm_callback, work);
+
+ if (cb->flags & DLM_CB_BAST) {
+ trace_dlm_bast(cb->ls_id, cb->lkb_id, cb->mode, cb->res_name,
+ cb->res_length);
+ cb->bastfn(cb->astparam, cb->mode);
+ } else if (cb->flags & DLM_CB_CAST) {
+ trace_dlm_ast(cb->ls_id, cb->lkb_id, cb->sb_status,
+ cb->sb_flags, cb->res_name, cb->res_length);
+ cb->lkb_lksb->sb_status = cb->sb_status;
+ cb->lkb_lksb->sb_flags = cb->sb_flags;
+ cb->astfn(cb->astparam);
+ }
dlm_free_cb(cb);
}
-void dlm_callback_set_last_ptr(struct dlm_callback **from,
- struct dlm_callback *to)
-{
- if (*from)
- kref_put(&(*from)->ref, dlm_release_callback);
-
- if (to)
- kref_get(&to->ref);
-
- *from = to;
-}
-
-int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
- int status, uint32_t sbflags)
+int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
+ int status, uint32_t sbflags,
+ struct dlm_callback **cb)
{
- struct dlm_ls *ls = lkb->lkb_resource->res_ls;
+ struct dlm_rsb *rsb = lkb->lkb_resource;
int rv = DLM_ENQUEUE_CALLBACK_SUCCESS;
- struct dlm_callback *cb;
+ struct dlm_ls *ls = rsb->res_ls;
+ int copy_lvb = 0;
int prev_mode;
if (flags & DLM_CB_BAST) {
/* if cb is a bast, it should be skipped if the blocking mode is
* compatible with the last granted mode
*/
- if (lkb->lkb_last_cast) {
- if (dlm_modes_compat(mode, lkb->lkb_last_cast->mode)) {
+ if (lkb->lkb_last_cast_cb_mode != -1) {
+ if (dlm_modes_compat(mode, lkb->lkb_last_cast_cb_mode)) {
log_debug(ls, "skip %x bast mode %d for cast mode %d",
lkb->lkb_id, mode,
- lkb->lkb_last_cast->mode);
+ lkb->lkb_last_cast_cb_mode);
goto out;
}
}
@@ -63,8 +66,9 @@ int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
* is a bast for the same mode or a more restrictive mode.
* (the addional > PR check is needed for PR/CW inversion)
*/
- if (lkb->lkb_last_cb && lkb->lkb_last_cb->flags & DLM_CB_BAST) {
- prev_mode = lkb->lkb_last_cb->mode;
+ if (lkb->lkb_last_cb_mode != -1 &&
+ lkb->lkb_last_cb_flags & DLM_CB_BAST) {
+ prev_mode = lkb->lkb_last_cb_mode;
if ((prev_mode == mode) ||
(prev_mode > mode && prev_mode > DLM_LOCK_PR)) {
@@ -73,53 +77,55 @@ int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
goto out;
}
}
+
+ lkb->lkb_last_bast_time = ktime_get();
+ lkb->lkb_last_bast_cb_mode = mode;
+ } else if (flags & DLM_CB_CAST) {
+ if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) {
+ prev_mode = lkb->lkb_last_cast_cb_mode;
+
+ if (!status && lkb->lkb_lksb->sb_lvbptr &&
+ dlm_lvb_operations[prev_mode + 1][mode + 1])
+ copy_lvb = 1;
+ }
+
+ lkb->lkb_last_cast_cb_mode = mode;
+ lkb->lkb_last_cast_time = ktime_get();
}
- cb = dlm_allocate_cb();
- if (!cb) {
+ lkb->lkb_last_cb_mode = mode;
+ lkb->lkb_last_cb_flags = flags;
+
+ *cb = dlm_allocate_cb();
+ if (!*cb) {
rv = DLM_ENQUEUE_CALLBACK_FAILURE;
goto out;
}
- cb->flags = flags;
- cb->mode = mode;
- cb->sb_status = status;
- cb->sb_flags = (sbflags & 0x000000FF);
- kref_init(&cb->ref);
- if (!test_and_set_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags))
- rv = DLM_ENQUEUE_CALLBACK_NEED_SCHED;
-
- list_add_tail(&cb->list, &lkb->lkb_callbacks);
+ /* for tracing */
+ (*cb)->lkb_id = lkb->lkb_id;
+ (*cb)->ls_id = ls->ls_global_id;
+ memcpy((*cb)->res_name, rsb->res_name, rsb->res_length);
+ (*cb)->res_length = rsb->res_length;
- if (flags & DLM_CB_CAST)
- dlm_callback_set_last_ptr(&lkb->lkb_last_cast, cb);
+ (*cb)->flags = flags;
+ (*cb)->mode = mode;
+ (*cb)->sb_status = status;
+ (*cb)->sb_flags = (sbflags & 0x000000FF);
+ (*cb)->copy_lvb = copy_lvb;
+ (*cb)->lkb_lksb = lkb->lkb_lksb;
- dlm_callback_set_last_ptr(&lkb->lkb_last_cb, cb);
+ rv = DLM_ENQUEUE_CALLBACK_NEED_SCHED;
- out:
+out:
return rv;
}
-int dlm_dequeue_lkb_callback(struct dlm_lkb *lkb, struct dlm_callback **cb)
-{
- /* oldest undelivered cb is callbacks first entry */
- *cb = list_first_entry_or_null(&lkb->lkb_callbacks,
- struct dlm_callback, list);
- if (!*cb)
- return DLM_DEQUEUE_CALLBACK_EMPTY;
-
- /* remove it from callbacks so shift others down */
- list_del(&(*cb)->list);
- if (list_empty(&lkb->lkb_callbacks))
- return DLM_DEQUEUE_CALLBACK_LAST;
-
- return DLM_DEQUEUE_CALLBACK_SUCCESS;
-}
-
void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
- uint32_t sbflags)
+ uint32_t sbflags)
{
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
+ struct dlm_callback *cb;
int rv;
if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) {
@@ -127,88 +133,36 @@ void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
return;
}
- spin_lock(&lkb->lkb_cb_lock);
- rv = dlm_enqueue_lkb_callback(lkb, flags, mode, status, sbflags);
+ rv = dlm_queue_lkb_callback(lkb, flags, mode, status, sbflags,
+ &cb);
switch (rv) {
case DLM_ENQUEUE_CALLBACK_NEED_SCHED:
- kref_get(&lkb->lkb_ref);
-
- spin_lock(&ls->ls_cb_lock);
- if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) {
- list_add(&lkb->lkb_cb_list, &ls->ls_cb_delay);
- } else {
- queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
- }
- spin_unlock(&ls->ls_cb_lock);
- break;
- case DLM_ENQUEUE_CALLBACK_FAILURE:
- WARN_ON_ONCE(1);
+ cb->astfn = lkb->lkb_astfn;
+ cb->bastfn = lkb->lkb_bastfn;
+ cb->astparam = lkb->lkb_astparam;
+ INIT_WORK(&cb->work, dlm_callback_work);
+
+ spin_lock_bh(&ls->ls_cb_lock);
+ if (test_bit(LSFL_CB_DELAY, &ls->ls_flags))
+ list_add(&cb->list, &ls->ls_cb_delay);
+ else
+ queue_work(ls->ls_callback_wq, &cb->work);
+ spin_unlock_bh(&ls->ls_cb_lock);
break;
case DLM_ENQUEUE_CALLBACK_SUCCESS:
break;
+ case DLM_ENQUEUE_CALLBACK_FAILURE:
+ fallthrough;
default:
WARN_ON_ONCE(1);
break;
}
- spin_unlock(&lkb->lkb_cb_lock);
-}
-
-void dlm_callback_work(struct work_struct *work)
-{
- struct dlm_lkb *lkb = container_of(work, struct dlm_lkb, lkb_cb_work);
- struct dlm_ls *ls = lkb->lkb_resource->res_ls;
- void (*castfn) (void *astparam);
- void (*bastfn) (void *astparam, int mode);
- struct dlm_callback *cb;
- int rv;
-
- spin_lock(&lkb->lkb_cb_lock);
- rv = dlm_dequeue_lkb_callback(lkb, &cb);
- if (WARN_ON_ONCE(rv == DLM_DEQUEUE_CALLBACK_EMPTY)) {
- clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags);
- spin_unlock(&lkb->lkb_cb_lock);
- goto out;
- }
- spin_unlock(&lkb->lkb_cb_lock);
-
- for (;;) {
- castfn = lkb->lkb_astfn;
- bastfn = lkb->lkb_bastfn;
-
- if (cb->flags & DLM_CB_BAST) {
- trace_dlm_bast(ls, lkb, cb->mode);
- lkb->lkb_last_bast_time = ktime_get();
- lkb->lkb_last_bast_mode = cb->mode;
- bastfn(lkb->lkb_astparam, cb->mode);
- } else if (cb->flags & DLM_CB_CAST) {
- lkb->lkb_lksb->sb_status = cb->sb_status;
- lkb->lkb_lksb->sb_flags = cb->sb_flags;
- trace_dlm_ast(ls, lkb);
- lkb->lkb_last_cast_time = ktime_get();
- castfn(lkb->lkb_astparam);
- }
-
- kref_put(&cb->ref, dlm_release_callback);
-
- spin_lock(&lkb->lkb_cb_lock);
- rv = dlm_dequeue_lkb_callback(lkb, &cb);
- if (rv == DLM_DEQUEUE_CALLBACK_EMPTY) {
- clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags);
- spin_unlock(&lkb->lkb_cb_lock);
- break;
- }
- spin_unlock(&lkb->lkb_cb_lock);
- }
-
-out:
- /* undo kref_get from dlm_add_callback, may cause lkb to be freed */
- dlm_put_lkb(lkb);
}
int dlm_callback_start(struct dlm_ls *ls)
{
- ls->ls_callback_wq = alloc_workqueue("dlm_callback",
- WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
+ ls->ls_callback_wq = alloc_ordered_workqueue("dlm_callback",
+ WQ_HIGHPRI | WQ_MEM_RECLAIM);
if (!ls->ls_callback_wq) {
log_print("can't start dlm_callback workqueue");
return -ENOMEM;
@@ -225,9 +179,9 @@ void dlm_callback_stop(struct dlm_ls *ls)
void dlm_callback_suspend(struct dlm_ls *ls)
{
if (ls->ls_callback_wq) {
- spin_lock(&ls->ls_cb_lock);
+ spin_lock_bh(&ls->ls_cb_lock);
set_bit(LSFL_CB_DELAY, &ls->ls_flags);
- spin_unlock(&ls->ls_cb_lock);
+ spin_unlock_bh(&ls->ls_cb_lock);
flush_workqueue(ls->ls_callback_wq);
}
@@ -237,7 +191,7 @@ void dlm_callback_suspend(struct dlm_ls *ls)
void dlm_callback_resume(struct dlm_ls *ls)
{
- struct dlm_lkb *lkb, *safe;
+ struct dlm_callback *cb, *safe;
int count = 0, sum = 0;
bool empty;
@@ -245,10 +199,10 @@ void dlm_callback_resume(struct dlm_ls *ls)
return;
more:
- spin_lock(&ls->ls_cb_lock);
- list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) {
- list_del_init(&lkb->lkb_cb_list);
- queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
+ spin_lock_bh(&ls->ls_cb_lock);
+ list_for_each_entry_safe(cb, safe, &ls->ls_cb_delay, list) {
+ list_del(&cb->list);
+ queue_work(ls->ls_callback_wq, &cb->work);
count++;
if (count == MAX_CB_QUEUE)
break;
@@ -256,7 +210,7 @@ more:
empty = list_empty(&ls->ls_cb_delay);
if (empty)
clear_bit(LSFL_CB_DELAY, &ls->ls_flags);
- spin_unlock(&ls->ls_cb_lock);
+ spin_unlock_bh(&ls->ls_cb_lock);
sum += count;
if (!empty) {
diff --git a/fs/dlm/ast.h b/fs/dlm/ast.h
index ce007892dc2d..9093ff043bee 100644
--- a/fs/dlm/ast.h
+++ b/fs/dlm/ast.h
@@ -14,19 +14,12 @@
#define DLM_ENQUEUE_CALLBACK_NEED_SCHED 1
#define DLM_ENQUEUE_CALLBACK_SUCCESS 0
#define DLM_ENQUEUE_CALLBACK_FAILURE -1
-int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
- int status, uint32_t sbflags);
-#define DLM_DEQUEUE_CALLBACK_EMPTY 2
-#define DLM_DEQUEUE_CALLBACK_LAST 1
-#define DLM_DEQUEUE_CALLBACK_SUCCESS 0
-int dlm_dequeue_lkb_callback(struct dlm_lkb *lkb, struct dlm_callback **cb);
+int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
+ int status, uint32_t sbflags,
+ struct dlm_callback **cb);
void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
uint32_t sbflags);
-void dlm_callback_set_last_ptr(struct dlm_callback **from,
- struct dlm_callback *to);
-void dlm_release_callback(struct kref *ref);
-void dlm_callback_work(struct work_struct *work);
int dlm_callback_start(struct dlm_ls *ls);
void dlm_callback_stop(struct dlm_ls *ls);
void dlm_callback_suspend(struct dlm_ls *ls);
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index e55e0a2cd2e8..517fa975dc5a 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -63,6 +63,14 @@ static void release_node(struct config_item *);
static struct configfs_attribute *comm_attrs[];
static struct configfs_attribute *node_attrs[];
+const struct rhashtable_params dlm_rhash_rsb_params = {
+ .nelem_hint = 3, /* start small */
+ .key_len = DLM_RESNAME_MAXLEN,
+ .key_offset = offsetof(struct dlm_rsb, res_name),
+ .head_offset = offsetof(struct dlm_rsb, res_node),
+ .automatic_shrinking = true,
+};
+
struct dlm_cluster {
struct config_group group;
unsigned int cl_tcp_port;
diff --git a/fs/dlm/config.h b/fs/dlm/config.h
index 4c91fcca0fd4..ed237d910208 100644
--- a/fs/dlm/config.h
+++ b/fs/dlm/config.h
@@ -21,6 +21,8 @@ struct dlm_config_node {
uint32_t comm_seq;
};
+extern const struct rhashtable_params dlm_rhash_rsb_params;
+
#define DLM_MAX_ADDR_COUNT 3
#define DLM_PROTO_TCP 0
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
index 4fa11d9ddbb6..6ab3ed4074c6 100644
--- a/fs/dlm/debug_fs.c
+++ b/fs/dlm/debug_fs.c
@@ -247,7 +247,7 @@ static void print_format3_lock(struct seq_file *s, struct dlm_lkb *lkb,
lkb->lkb_status,
lkb->lkb_grmode,
lkb->lkb_rqmode,
- lkb->lkb_last_bast_mode,
+ lkb->lkb_last_bast_cb_mode,
rsb_lookup,
lkb->lkb_wait_type,
lkb->lkb_lvbseq,
@@ -366,58 +366,10 @@ static void print_format4(struct dlm_rsb *r, struct seq_file *s)
unlock_rsb(r);
}
-static void print_format5_lock(struct seq_file *s, struct dlm_lkb *lkb)
-{
- struct dlm_callback *cb;
-
- /* lkb_id lkb_flags mode flags sb_status sb_flags */
-
- spin_lock(&lkb->lkb_cb_lock);
- list_for_each_entry(cb, &lkb->lkb_callbacks, list) {
- seq_printf(s, "%x %x %d %x %d %x\n",
- lkb->lkb_id,
- dlm_iflags_val(lkb),
- cb->mode,
- cb->flags,
- cb->sb_status,
- cb->sb_flags);
- }
- spin_unlock(&lkb->lkb_cb_lock);
-}
-
-static void print_format5(struct dlm_rsb *r, struct seq_file *s)
-{
- struct dlm_lkb *lkb;
-
- lock_rsb(r);
-
- list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
- print_format5_lock(s, lkb);
- if (seq_has_overflowed(s))
- goto out;
- }
-
- list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
- print_format5_lock(s, lkb);
- if (seq_has_overflowed(s))
- goto out;
- }
-
- list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue) {
- print_format5_lock(s, lkb);
- if (seq_has_overflowed(s))
- goto out;
- }
- out:
- unlock_rsb(r);
-}
-
-struct rsbtbl_iter {
- struct dlm_rsb *rsb;
- unsigned bucket;
- int format;
- int header;
-};
+static const struct seq_operations format1_seq_ops;
+static const struct seq_operations format2_seq_ops;
+static const struct seq_operations format3_seq_ops;
+static const struct seq_operations format4_seq_ops;
/*
* If the buffer is full, seq_printf can be called again, but it
@@ -428,207 +380,61 @@ struct rsbtbl_iter {
static int table_seq_show(struct seq_file *seq, void *iter_ptr)
{
- struct rsbtbl_iter *ri = iter_ptr;
-
- switch (ri->format) {
- case 1:
- print_format1(ri->rsb, seq);
- break;
- case 2:
- if (ri->header) {
- seq_puts(seq, "id nodeid remid pid xid exflags flags sts grmode rqmode time_ms r_nodeid r_len r_name\n");
- ri->header = 0;
- }
- print_format2(ri->rsb, seq);
- break;
- case 3:
- if (ri->header) {
- seq_puts(seq, "rsb ptr nodeid first_lkid flags !root_list_empty !recover_list_empty recover_locks_count len\n");
- ri->header = 0;
- }
- print_format3(ri->rsb, seq);
- break;
- case 4:
- if (ri->header) {
- seq_puts(seq, "rsb ptr nodeid master_nodeid dir_nodeid our_nodeid toss_time flags len str|hex name\n");
- ri->header = 0;
- }
- print_format4(ri->rsb, seq);
- break;
- case 5:
- if (ri->header) {
- seq_puts(seq, "lkb_id lkb_flags mode flags sb_status sb_flags\n");
- ri->header = 0;
- }
- print_format5(ri->rsb, seq);
- break;
- }
+ struct dlm_rsb *rsb = list_entry(iter_ptr, struct dlm_rsb, res_rsbs_list);
+
+ if (seq->op == &format1_seq_ops)
+ print_format1(rsb, seq);
+ else if (seq->op == &format2_seq_ops)
+ print_format2(rsb, seq);
+ else if (seq->op == &format3_seq_ops)
+ print_format3(rsb, seq);
+ else if (seq->op == &format4_seq_ops)
+ print_format4(rsb, seq);
return 0;
}
-static const struct seq_operations format1_seq_ops;
-static const struct seq_operations format2_seq_ops;
-static const struct seq_operations format3_seq_ops;
-static const struct seq_operations format4_seq_ops;
-static const struct seq_operations format5_seq_ops;
-
static void *table_seq_start(struct seq_file *seq, loff_t *pos)
{
- struct rb_root *tree;
- struct rb_node *node;
struct dlm_ls *ls = seq->private;
- struct rsbtbl_iter *ri;
- struct dlm_rsb *r;
- loff_t n = *pos;
- unsigned bucket, entry;
- int toss = (seq->op == &format4_seq_ops);
-
- bucket = n >> 32;
- entry = n & ((1LL << 32) - 1);
-
- if (bucket >= ls->ls_rsbtbl_size)
- return NULL;
-
- ri = kzalloc(sizeof(*ri), GFP_NOFS);
- if (!ri)
- return NULL;
- if (n == 0)
- ri->header = 1;
- if (seq->op == &format1_seq_ops)
- ri->format = 1;
- if (seq->op == &format2_seq_ops)
- ri->format = 2;
- if (seq->op == &format3_seq_ops)
- ri->format = 3;
- if (seq->op == &format4_seq_ops)
- ri->format = 4;
- if (seq->op == &format5_seq_ops)
- ri->format = 5;
-
- tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep;
-
- spin_lock(&ls->ls_rsbtbl[bucket].lock);
- if (!RB_EMPTY_ROOT(tree)) {
- for (node = rb_first(tree); node; node = rb_next(node)) {
- r = rb_entry(node, struct dlm_rsb, res_hashnode);
- if (!entry--) {
- dlm_hold_rsb(r);
- ri->rsb = r;
- ri->bucket = bucket;
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
- return ri;
- }
- }
- }
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
-
- /*
- * move to the first rsb in the next non-empty bucket
- */
+ struct list_head *list;
- /* zero the entry */
- n &= ~((1LL << 32) - 1);
+ if (!*pos) {
+ if (seq->op == &format2_seq_ops)
+ seq_puts(seq, "id nodeid remid pid xid exflags flags sts grmode rqmode time_ms r_nodeid r_len r_name\n");
+ else if (seq->op == &format3_seq_ops)
+ seq_puts(seq, "rsb ptr nodeid first_lkid flags !root_list_empty !recover_list_empty recover_locks_count len\n");
+ else if (seq->op == &format4_seq_ops)
+ seq_puts(seq, "rsb ptr nodeid master_nodeid dir_nodeid our_nodeid toss_time flags len str|hex name\n");
+ }
- while (1) {
- bucket++;
- n += 1LL << 32;
+ if (seq->op == &format4_seq_ops)
+ list = &ls->ls_toss;
+ else
+ list = &ls->ls_keep;
- if (bucket >= ls->ls_rsbtbl_size) {
- kfree(ri);
- return NULL;
- }
- tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep;
-
- spin_lock(&ls->ls_rsbtbl[bucket].lock);
- if (!RB_EMPTY_ROOT(tree)) {
- node = rb_first(tree);
- r = rb_entry(node, struct dlm_rsb, res_hashnode);
- dlm_hold_rsb(r);
- ri->rsb = r;
- ri->bucket = bucket;
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
- *pos = n;
- return ri;
- }
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
- }
+ read_lock_bh(&ls->ls_rsbtbl_lock);
+ return seq_list_start(list, *pos);
}
static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
{
struct dlm_ls *ls = seq->private;
- struct rsbtbl_iter *ri = iter_ptr;
- struct rb_root *tree;
- struct rb_node *next;
- struct dlm_rsb *r, *rp;
- loff_t n = *pos;
- unsigned bucket;
- int toss = (seq->op == &format4_seq_ops);
-
- bucket = n >> 32;
-
- /*
- * move to the next rsb in the same bucket
- */
-
- spin_lock(&ls->ls_rsbtbl[bucket].lock);
- rp = ri->rsb;
- next = rb_next(&rp->res_hashnode);
-
- if (next) {
- r = rb_entry(next, struct dlm_rsb, res_hashnode);
- dlm_hold_rsb(r);
- ri->rsb = r;
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
- dlm_put_rsb(rp);
- ++*pos;
- return ri;
- }
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
- dlm_put_rsb(rp);
+ struct list_head *list;
- /*
- * move to the first rsb in the next non-empty bucket
- */
-
- /* zero the entry */
- n &= ~((1LL << 32) - 1);
-
- while (1) {
- bucket++;
- n += 1LL << 32;
+ if (seq->op == &format4_seq_ops)
+ list = &ls->ls_toss;
+ else
+ list = &ls->ls_keep;
- if (bucket >= ls->ls_rsbtbl_size) {
- kfree(ri);
- ++*pos;
- return NULL;
- }
- tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep;
-
- spin_lock(&ls->ls_rsbtbl[bucket].lock);
- if (!RB_EMPTY_ROOT(tree)) {
- next = rb_first(tree);
- r = rb_entry(next, struct dlm_rsb, res_hashnode);
- dlm_hold_rsb(r);
- ri->rsb = r;
- ri->bucket = bucket;
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
- *pos = n;
- return ri;
- }
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
- }
+ return seq_list_next(iter_ptr, list, pos);
}
static void table_seq_stop(struct seq_file *seq, void *iter_ptr)
{
- struct rsbtbl_iter *ri = iter_ptr;
+ struct dlm_ls *ls = seq->private;
- if (ri) {
- dlm_put_rsb(ri->rsb);
- kfree(ri);
- }
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
}
static const struct seq_operations format1_seq_ops = {
@@ -659,18 +465,10 @@ static const struct seq_operations format4_seq_ops = {
.show = table_seq_show,
};
-static const struct seq_operations format5_seq_ops = {
- .start = table_seq_start,
- .next = table_seq_next,
- .stop = table_seq_stop,
- .show = table_seq_show,
-};
-
static const struct file_operations format1_fops;
static const struct file_operations format2_fops;
static const struct file_operations format3_fops;
static const struct file_operations format4_fops;
-static const struct file_operations format5_fops;
static int table_open1(struct inode *inode, struct file *file)
{
@@ -757,20 +555,6 @@ static int table_open4(struct inode *inode, struct file *file)
return 0;
}
-static int table_open5(struct inode *inode, struct file *file)
-{
- struct seq_file *seq;
- int ret;
-
- ret = seq_open(file, &format5_seq_ops);
- if (ret)
- return ret;
-
- seq = file->private_data;
- seq->private = inode->i_private; /* the dlm_ls */
- return 0;
-}
-
static const struct file_operations format1_fops = {
.owner = THIS_MODULE,
.open = table_open1,
@@ -804,14 +588,6 @@ static const struct file_operations format4_fops = {
.release = seq_release
};
-static const struct file_operations format5_fops = {
- .owner = THIS_MODULE,
- .open = table_open5,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release
-};
-
/*
* dump lkb's on the ls_waiters list
*/
@@ -823,7 +599,13 @@ static ssize_t waiters_read(struct file *file, char __user *userbuf,
size_t len = DLM_DEBUG_BUF_LEN, pos = 0, ret, rv;
mutex_lock(&debug_buf_lock);
- mutex_lock(&ls->ls_waiters_mutex);
+ ret = dlm_lock_recovery_try(ls);
+ if (!ret) {
+ rv = -EAGAIN;
+ goto out;
+ }
+
+ spin_lock_bh(&ls->ls_waiters_lock);
memset(debug_buf, 0, sizeof(debug_buf));
list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
@@ -834,9 +616,11 @@ static ssize_t waiters_read(struct file *file, char __user *userbuf,
break;
pos += ret;
}
- mutex_unlock(&ls->ls_waiters_mutex);
+ spin_unlock_bh(&ls->ls_waiters_lock);
+ dlm_unlock_recovery(ls);
rv = simple_read_from_buffer(userbuf, count, ppos, debug_buf, pos);
+out:
mutex_unlock(&debug_buf_lock);
return rv;
}
@@ -858,7 +642,12 @@ static ssize_t waiters_write(struct file *file, const char __user *user_buf,
if (n != 3)
return -EINVAL;
+ error = dlm_lock_recovery_try(ls);
+ if (!error)
+ return -EAGAIN;
+
error = dlm_debug_add_lkb_to_waiters(ls, lkb_id, mstype, to_nodeid);
+ dlm_unlock_recovery(ls);
if (error)
return error;
@@ -1021,16 +810,6 @@ void dlm_create_debug_file(struct dlm_ls *ls)
dlm_root,
ls,
&waiters_fops);
-
- /* format 5 */
-
- snprintf(name, sizeof(name), "%s_queued_asts", ls->ls_name);
-
- ls->ls_debug_queued_asts_dentry = debugfs_create_file(name,
- 0644,
- dlm_root,
- ls,
- &format5_fops);
}
void __init dlm_register_debugfs(void)
diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c
index f6acba4310a7..b1ab0adbd9d0 100644
--- a/fs/dlm/dir.c
+++ b/fs/dlm/dir.c
@@ -47,15 +47,13 @@ int dlm_dir_nodeid(struct dlm_rsb *r)
return r->res_dir_nodeid;
}
-void dlm_recover_dir_nodeid(struct dlm_ls *ls)
+void dlm_recover_dir_nodeid(struct dlm_ls *ls, const struct list_head *root_list)
{
struct dlm_rsb *r;
- down_read(&ls->ls_root_sem);
- list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
+ list_for_each_entry(r, root_list, res_root_list) {
r->res_dir_nodeid = dlm_hash2nodeid(ls, r->res_hash);
}
- up_read(&ls->ls_root_sem);
}
int dlm_recover_directory(struct dlm_ls *ls, uint64_t seq)
@@ -200,35 +198,98 @@ static struct dlm_rsb *find_rsb_root(struct dlm_ls *ls, const char *name,
int len)
{
struct dlm_rsb *r;
- uint32_t hash, bucket;
int rv;
- hash = jhash(name, len, 0);
- bucket = hash & (ls->ls_rsbtbl_size - 1);
-
- spin_lock(&ls->ls_rsbtbl[bucket].lock);
- rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].keep, name, len, &r);
- if (rv)
- rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].toss,
- name, len, &r);
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
-
+ read_lock_bh(&ls->ls_rsbtbl_lock);
+ rv = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
if (!rv)
return r;
- down_read(&ls->ls_root_sem);
- list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
+ list_for_each_entry(r, &ls->ls_masters_list, res_masters_list) {
if (len == r->res_length && !memcmp(name, r->res_name, len)) {
- up_read(&ls->ls_root_sem);
log_debug(ls, "find_rsb_root revert to root_list %s",
r->res_name);
return r;
}
}
- up_read(&ls->ls_root_sem);
return NULL;
}
+struct dlm_dir_dump {
+ /* init values to match if whole
+ * dump fits to one seq. Sanity check only.
+ */
+ uint64_t seq_init;
+ uint64_t nodeid_init;
+ /* compare local pointer with last lookup,
+ * just a sanity check.
+ */
+ struct list_head *last;
+
+ unsigned int sent_res; /* for log info */
+ unsigned int sent_msg; /* for log info */
+
+ struct list_head list;
+};
+
+static void drop_dir_ctx(struct dlm_ls *ls, int nodeid)
+{
+ struct dlm_dir_dump *dd, *safe;
+
+ write_lock_bh(&ls->ls_dir_dump_lock);
+ list_for_each_entry_safe(dd, safe, &ls->ls_dir_dump_list, list) {
+ if (dd->nodeid_init == nodeid) {
+ log_error(ls, "drop dump seq %llu",
+ (unsigned long long)dd->seq_init);
+ list_del(&dd->list);
+ kfree(dd);
+ }
+ }
+ write_unlock_bh(&ls->ls_dir_dump_lock);
+}
+
+static struct dlm_dir_dump *lookup_dir_dump(struct dlm_ls *ls, int nodeid)
+{
+ struct dlm_dir_dump *iter, *dd = NULL;
+
+ read_lock_bh(&ls->ls_dir_dump_lock);
+ list_for_each_entry(iter, &ls->ls_dir_dump_list, list) {
+ if (iter->nodeid_init == nodeid) {
+ dd = iter;
+ break;
+ }
+ }
+ read_unlock_bh(&ls->ls_dir_dump_lock);
+
+ return dd;
+}
+
+static struct dlm_dir_dump *init_dir_dump(struct dlm_ls *ls, int nodeid)
+{
+ struct dlm_dir_dump *dd;
+
+ dd = lookup_dir_dump(ls, nodeid);
+ if (dd) {
+ log_error(ls, "found ongoing dir dump for node %d, will drop it",
+ nodeid);
+ drop_dir_ctx(ls, nodeid);
+ }
+
+ dd = kzalloc(sizeof(*dd), GFP_ATOMIC);
+ if (!dd)
+ return NULL;
+
+ dd->seq_init = ls->ls_recover_seq;
+ dd->nodeid_init = nodeid;
+
+ write_lock_bh(&ls->ls_dir_dump_lock);
+ list_add(&dd->list, &ls->ls_dir_dump_list);
+ write_unlock_bh(&ls->ls_dir_dump_lock);
+
+ return dd;
+}
+
/* Find the rsb where we left off (or start again), then send rsb names
for rsb's we're master of and whose directory node matches the requesting
node. inbuf is the rsb name last sent, inlen is the name's length */
@@ -239,27 +300,50 @@ void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen,
struct list_head *list;
struct dlm_rsb *r;
int offset = 0, dir_nodeid;
+ struct dlm_dir_dump *dd;
__be16 be_namelen;
- down_read(&ls->ls_root_sem);
+ read_lock_bh(&ls->ls_masters_lock);
if (inlen > 1) {
+ dd = lookup_dir_dump(ls, nodeid);
+ if (!dd) {
+ log_error(ls, "failed to lookup dir dump context nodeid: %d",
+ nodeid);
+ goto out;
+ }
+
+ /* next chunk in dump */
r = find_rsb_root(ls, inbuf, inlen);
if (!r) {
log_error(ls, "copy_master_names from %d start %d %.*s",
nodeid, inlen, inlen, inbuf);
goto out;
}
- list = r->res_root_list.next;
+ list = r->res_masters_list.next;
+
+ /* sanity checks */
+ if (dd->last != &r->res_masters_list ||
+ dd->seq_init != ls->ls_recover_seq) {
+ log_error(ls, "failed dir dump sanity check seq_init: %llu seq: %llu",
+ (unsigned long long)dd->seq_init,
+ (unsigned long long)ls->ls_recover_seq);
+ goto out;
+ }
} else {
- list = ls->ls_root_list.next;
- }
+ dd = init_dir_dump(ls, nodeid);
+ if (!dd) {
+ log_error(ls, "failed to allocate dir dump context");
+ goto out;
+ }
- for (offset = 0; list != &ls->ls_root_list; list = list->next) {
- r = list_entry(list, struct dlm_rsb, res_root_list);
- if (r->res_nodeid)
- continue;
+ /* start dump */
+ list = ls->ls_masters_list.next;
+ dd->last = list;
+ }
+ for (offset = 0; list != &ls->ls_masters_list; list = list->next) {
+ r = list_entry(list, struct dlm_rsb, res_masters_list);
dir_nodeid = dlm_dir_nodeid(r);
if (dir_nodeid != nodeid)
continue;
@@ -277,7 +361,7 @@ void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen,
be_namelen = cpu_to_be16(0);
memcpy(outbuf + offset, &be_namelen, sizeof(__be16));
offset += sizeof(__be16);
- ls->ls_recover_dir_sent_msg++;
+ dd->sent_msg++;
goto out;
}
@@ -286,7 +370,8 @@ void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen,
offset += sizeof(__be16);
memcpy(outbuf + offset, r->res_name, r->res_length);
offset += r->res_length;
- ls->ls_recover_dir_sent_res++;
+ dd->sent_res++;
+ dd->last = list;
}
/*
@@ -294,14 +379,22 @@ void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen,
* terminating record.
*/
- if ((list == &ls->ls_root_list) &&
+ if ((list == &ls->ls_masters_list) &&
(offset + sizeof(uint16_t) <= outlen)) {
+ /* end dump */
be_namelen = cpu_to_be16(0xFFFF);
memcpy(outbuf + offset, &be_namelen, sizeof(__be16));
offset += sizeof(__be16);
- ls->ls_recover_dir_sent_msg++;
+ dd->sent_msg++;
+ log_rinfo(ls, "dlm_recover_directory nodeid %d sent %u res out %u messages",
+ nodeid, dd->sent_res, dd->sent_msg);
+
+ write_lock_bh(&ls->ls_dir_dump_lock);
+ list_del_init(&dd->list);
+ write_unlock_bh(&ls->ls_dir_dump_lock);
+ kfree(dd);
}
out:
- up_read(&ls->ls_root_sem);
+ read_unlock_bh(&ls->ls_masters_lock);
}
diff --git a/fs/dlm/dir.h b/fs/dlm/dir.h
index 39ecb69d7ef3..5b2a7ee3762d 100644
--- a/fs/dlm/dir.h
+++ b/fs/dlm/dir.h
@@ -14,7 +14,8 @@
int dlm_dir_nodeid(struct dlm_rsb *rsb);
int dlm_hash2nodeid(struct dlm_ls *ls, uint32_t hash);
-void dlm_recover_dir_nodeid(struct dlm_ls *ls);
+void dlm_recover_dir_nodeid(struct dlm_ls *ls,
+ const struct list_head *root_list);
int dlm_recover_directory(struct dlm_ls *ls, uint64_t seq);
void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen,
char *outbuf, int outlen, int nodeid);
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 3b4dbce849f0..9085ba3b2f20 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -16,6 +16,7 @@
* This is the main header file to be included in each DLM source file.
*/
+#include <uapi/linux/dlm_device.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/types.h>
@@ -33,6 +34,7 @@
#include <linux/kernel.h>
#include <linux/jhash.h>
#include <linux/miscdevice.h>
+#include <linux/rhashtable.h>
#include <linux/mutex.h>
#include <linux/idr.h>
#include <linux/ratelimit.h>
@@ -98,17 +100,6 @@ do { \
} \
}
-
-#define DLM_RTF_SHRINK_BIT 0
-
-struct dlm_rsbtable {
- struct rb_root keep;
- struct rb_root toss;
- spinlock_t lock;
- unsigned long flags;
-};
-
-
/*
* Lockspace member (per node in a ls)
*/
@@ -204,8 +195,7 @@ struct dlm_args {
#define DLM_IFL_OVERLAP_CANCEL_BIT 20
#define DLM_IFL_ENDOFLIFE_BIT 21
#define DLM_IFL_DEADLOCK_CANCEL_BIT 24
-#define DLM_IFL_CB_PENDING_BIT 25
-#define __DLM_IFL_MAX_BIT DLM_IFL_CB_PENDING_BIT
+#define __DLM_IFL_MAX_BIT DLM_IFL_DEADLOCK_CANCEL_BIT
/* lkb_dflags */
@@ -217,14 +207,47 @@ struct dlm_args {
#define DLM_CB_CAST 0x00000001
#define DLM_CB_BAST 0x00000002
+/* much of this is just saving user space pointers associated with the
+ * lock that we pass back to the user lib with an ast
+ */
+
+struct dlm_user_args {
+ struct dlm_user_proc *proc; /* each process that opens the lockspace
+ * device has private data
+ * (dlm_user_proc) on the struct file,
+ * the process's locks point back to it
+ */
+ struct dlm_lksb lksb;
+ struct dlm_lksb __user *user_lksb;
+ void __user *castparam;
+ void __user *castaddr;
+ void __user *bastparam;
+ void __user *bastaddr;
+ uint64_t xid;
+};
+
struct dlm_callback {
uint32_t flags; /* DLM_CBF_ */
int sb_status; /* copy to lksb status */
uint8_t sb_flags; /* copy to lksb flags */
int8_t mode; /* rq mode of bast, gr mode of cast */
+ bool copy_lvb;
+ struct dlm_lksb *lkb_lksb;
+ unsigned char lvbptr[DLM_USER_LVB_LEN];
+
+ union {
+ void *astparam; /* caller's ast arg */
+ struct dlm_user_args ua;
+ };
+ struct work_struct work;
+ void (*bastfn)(void *astparam, int mode);
+ void (*astfn)(void *astparam);
+ char res_name[DLM_RESNAME_MAXLEN];
+ size_t res_length;
+ uint32_t ls_id;
+ uint32_t lkb_id;
struct list_head list;
- struct kref ref;
};
struct dlm_lkb {
@@ -255,13 +278,10 @@ struct dlm_lkb {
struct list_head lkb_ownqueue; /* list of locks for a process */
ktime_t lkb_timestamp;
- spinlock_t lkb_cb_lock;
- struct work_struct lkb_cb_work;
- struct list_head lkb_cb_list; /* for ls_cb_delay or proc->asts */
- struct list_head lkb_callbacks;
- struct dlm_callback *lkb_last_cast;
- struct dlm_callback *lkb_last_cb;
- int lkb_last_bast_mode;
+ int8_t lkb_last_cast_cb_mode;
+ int8_t lkb_last_bast_cb_mode;
+ int8_t lkb_last_cb_mode;
+ uint8_t lkb_last_cb_flags;
ktime_t lkb_last_cast_time; /* for debugging */
ktime_t lkb_last_bast_time; /* for debugging */
@@ -290,7 +310,7 @@ struct dlm_lkb {
struct dlm_rsb {
struct dlm_ls *res_ls; /* the lockspace */
struct kref res_ref;
- struct mutex res_mutex;
+ spinlock_t res_lock;
unsigned long res_flags;
int res_length; /* length of rsb name */
int res_nodeid;
@@ -299,20 +319,22 @@ struct dlm_rsb {
int res_id; /* for ls_recover_idr */
uint32_t res_lvbseq;
uint32_t res_hash;
- uint32_t res_bucket; /* rsbtbl */
unsigned long res_toss_time;
uint32_t res_first_lkid;
struct list_head res_lookup; /* lkbs waiting on first */
union {
struct list_head res_hashchain;
- struct rb_node res_hashnode; /* rsbtbl */
+ struct rhash_head res_node; /* rsbtbl */
};
struct list_head res_grantqueue;
struct list_head res_convertqueue;
struct list_head res_waitqueue;
+ struct list_head res_rsbs_list;
struct list_head res_root_list; /* used for recovery */
+ struct list_head res_masters_list; /* used for recovery */
struct list_head res_recover_list; /* used for recovery */
+ struct list_head res_toss_q_list;
int res_recover_locks_count;
char *res_lvbptr;
@@ -346,6 +368,7 @@ enum rsb_flags {
RSB_RECOVER_CONVERT,
RSB_RECOVER_GRANT,
RSB_RECOVER_LVB_INVAL,
+ RSB_TOSS,
};
static inline void rsb_set_flag(struct dlm_rsb *r, enum rsb_flags flag)
@@ -559,24 +582,33 @@ struct dlm_ls {
struct kobject ls_kobj;
struct idr ls_lkbidr;
- spinlock_t ls_lkbidr_spin;
+ rwlock_t ls_lkbidr_lock;
+
+ struct rhashtable ls_rsbtbl;
+ rwlock_t ls_rsbtbl_lock;
- struct dlm_rsbtable *ls_rsbtbl;
- uint32_t ls_rsbtbl_size;
+ struct list_head ls_toss;
+ struct list_head ls_keep;
- struct mutex ls_waiters_mutex;
+ struct timer_list ls_timer;
+ /* this queue is ordered according the
+ * absolute res_toss_time jiffies time
+ * to mod_timer() with the first element
+ * if necessary.
+ */
+ struct list_head ls_toss_q;
+ spinlock_t ls_toss_q_lock;
+
+ spinlock_t ls_waiters_lock;
struct list_head ls_waiters; /* lkbs needing a reply */
- struct mutex ls_orphans_mutex;
+ spinlock_t ls_orphans_lock;
struct list_head ls_orphans;
spinlock_t ls_new_rsb_spin;
int ls_new_rsb_count;
struct list_head ls_new_rsb; /* new rsb structs */
- char *ls_remove_names[DLM_REMOVE_NAMES_MAX];
- int ls_remove_lens[DLM_REMOVE_NAMES_MAX];
-
struct list_head ls_nodes; /* current nodes in ls */
struct list_head ls_nodes_gone; /* dead node list, recovery */
int ls_num_nodes; /* number of nodes in ls */
@@ -613,7 +645,6 @@ struct dlm_ls {
spinlock_t ls_cb_lock;
struct list_head ls_cb_delay; /* save for queue_work later */
- struct timer_list ls_timer;
struct task_struct *ls_recoverd_task;
struct mutex ls_recoverd_active;
spinlock_t ls_recover_lock;
@@ -622,15 +653,11 @@ struct dlm_ls {
uint64_t ls_recover_seq;
struct dlm_recover *ls_recover_args;
struct rw_semaphore ls_in_recovery; /* block local requests */
- struct rw_semaphore ls_recv_active; /* block dlm_recv */
+ rwlock_t ls_recv_active; /* block dlm_recv */
struct list_head ls_requestqueue;/* queue remote requests */
- atomic_t ls_requestqueue_cnt;
- wait_queue_head_t ls_requestqueue_wait;
- struct mutex ls_requestqueue_mutex;
+ rwlock_t ls_requestqueue_lock;
struct dlm_rcom *ls_recover_buf;
int ls_recover_nodeid; /* for debugging */
- unsigned int ls_recover_dir_sent_res; /* for log info */
- unsigned int ls_recover_dir_sent_msg; /* for log info */
unsigned int ls_recover_locks_in; /* for log info */
uint64_t ls_rcom_seq;
spinlock_t ls_rcom_spin;
@@ -643,8 +670,10 @@ struct dlm_ls {
wait_queue_head_t ls_recover_lock_wait;
spinlock_t ls_clear_proc_locks;
- struct list_head ls_root_list; /* root resources */
- struct rw_semaphore ls_root_sem; /* protect root_list */
+ struct list_head ls_masters_list; /* root resources */
+ rwlock_t ls_masters_lock; /* protect root_list */
+ struct list_head ls_dir_dump_list; /* root resources */
+ rwlock_t ls_dir_dump_lock; /* protect root_list */
const struct dlm_lockspace_ops *ls_ops;
void *ls_ops_arg;
@@ -686,23 +715,7 @@ struct dlm_ls {
#define LSFL_UEVENT_WAIT 7
#define LSFL_CB_DELAY 9
#define LSFL_NODIR 10
-
-/* much of this is just saving user space pointers associated with the
- lock that we pass back to the user lib with an ast */
-
-struct dlm_user_args {
- struct dlm_user_proc *proc; /* each process that opens the lockspace
- device has private data
- (dlm_user_proc) on the struct file,
- the process's locks point back to it*/
- struct dlm_lksb lksb;
- struct dlm_lksb __user *user_lksb;
- void __user *castparam;
- void __user *castaddr;
- void __user *bastparam;
- void __user *bastaddr;
- uint64_t xid;
-};
+#define LSFL_RECV_MSG_BLOCKED 11
#define DLM_PROC_FLAGS_CLOSING 1
#define DLM_PROC_FLAGS_COMPAT 2
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index fd752dd03896..f103b8c30592 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -201,7 +201,7 @@ void dlm_dump_rsb(struct dlm_rsb *r)
/* Threads cannot use the lockspace while it's being recovered */
-static inline void dlm_lock_recovery(struct dlm_ls *ls)
+void dlm_lock_recovery(struct dlm_ls *ls)
{
down_read(&ls->ls_in_recovery);
}
@@ -320,11 +320,18 @@ static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
* Basic operations on rsb's and lkb's
*/
+static inline unsigned long rsb_toss_jiffies(void)
+{
+ return jiffies + (READ_ONCE(dlm_config.ci_toss_secs) * HZ);
+}
+
/* This is only called to add a reference when the code already holds
a valid reference to the rsb, so there's no need for locking. */
static inline void hold_rsb(struct dlm_rsb *r)
{
+ /* rsbs in toss state never get referenced */
+ WARN_ON(rsb_flag(r, RSB_TOSS));
kref_get(&r->res_ref);
}
@@ -333,19 +340,48 @@ void dlm_hold_rsb(struct dlm_rsb *r)
hold_rsb(r);
}
+/* TODO move this to lib/refcount.c */
+static __must_check bool
+dlm_refcount_dec_and_write_lock_bh(refcount_t *r, rwlock_t *lock)
+__cond_acquires(lock)
+{
+ if (refcount_dec_not_one(r))
+ return false;
+
+ write_lock_bh(lock);
+ if (!refcount_dec_and_test(r)) {
+ write_unlock_bh(lock);
+ return false;
+ }
+
+ return true;
+}
+
+/* TODO move this to include/linux/kref.h */
+static inline int dlm_kref_put_write_lock_bh(struct kref *kref,
+ void (*release)(struct kref *kref),
+ rwlock_t *lock)
+{
+ if (dlm_refcount_dec_and_write_lock_bh(&kref->refcount, lock)) {
+ release(kref);
+ return 1;
+ }
+
+ return 0;
+}
+
/* When all references to the rsb are gone it's transferred to
the tossed list for later disposal. */
static void put_rsb(struct dlm_rsb *r)
{
struct dlm_ls *ls = r->res_ls;
- uint32_t bucket = r->res_bucket;
int rv;
- rv = kref_put_lock(&r->res_ref, toss_rsb,
- &ls->ls_rsbtbl[bucket].lock);
+ rv = dlm_kref_put_write_lock_bh(&r->res_ref, toss_rsb,
+ &ls->ls_rsbtbl_lock);
if (rv)
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
}
void dlm_put_rsb(struct dlm_rsb *r)
@@ -358,17 +394,17 @@ static int pre_rsb_struct(struct dlm_ls *ls)
struct dlm_rsb *r1, *r2;
int count = 0;
- spin_lock(&ls->ls_new_rsb_spin);
+ spin_lock_bh(&ls->ls_new_rsb_spin);
if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) {
- spin_unlock(&ls->ls_new_rsb_spin);
+ spin_unlock_bh(&ls->ls_new_rsb_spin);
return 0;
}
- spin_unlock(&ls->ls_new_rsb_spin);
+ spin_unlock_bh(&ls->ls_new_rsb_spin);
r1 = dlm_allocate_rsb(ls);
r2 = dlm_allocate_rsb(ls);
- spin_lock(&ls->ls_new_rsb_spin);
+ spin_lock_bh(&ls->ls_new_rsb_spin);
if (r1) {
list_add(&r1->res_hashchain, &ls->ls_new_rsb);
ls->ls_new_rsb_count++;
@@ -378,13 +414,236 @@ static int pre_rsb_struct(struct dlm_ls *ls)
ls->ls_new_rsb_count++;
}
count = ls->ls_new_rsb_count;
- spin_unlock(&ls->ls_new_rsb_spin);
+ spin_unlock_bh(&ls->ls_new_rsb_spin);
if (!count)
return -ENOMEM;
return 0;
}
+/* connected with timer_delete_sync() in dlm_ls_stop() to stop
+ * new timers when recovery is triggered and don't run them
+ * again until a dlm_timer_resume() tries it again.
+ */
+static void __rsb_mod_timer(struct dlm_ls *ls, unsigned long jiffies)
+{
+ if (!dlm_locking_stopped(ls))
+ mod_timer(&ls->ls_timer, jiffies);
+}
+
+/* This function tries to resume the timer callback if a rsb
+ * is on the toss list and no timer is pending. It might that
+ * the first entry is on currently executed as timer callback
+ * but we don't care if a timer queued up again and does
+ * nothing. Should be a rare case.
+ */
+void dlm_timer_resume(struct dlm_ls *ls)
+{
+ struct dlm_rsb *r;
+
+ spin_lock_bh(&ls->ls_toss_q_lock);
+ r = list_first_entry_or_null(&ls->ls_toss_q, struct dlm_rsb,
+ res_toss_q_list);
+ if (r && !timer_pending(&ls->ls_timer))
+ __rsb_mod_timer(ls, r->res_toss_time);
+ spin_unlock_bh(&ls->ls_toss_q_lock);
+}
+
+/* ls_rsbtbl_lock must be held and being sure the rsb is in toss state */
+static void rsb_delete_toss_timer(struct dlm_ls *ls, struct dlm_rsb *r)
+{
+ struct dlm_rsb *first;
+
+ spin_lock_bh(&ls->ls_toss_q_lock);
+ r->res_toss_time = 0;
+
+ /* if the rsb is not queued do nothing */
+ if (list_empty(&r->res_toss_q_list))
+ goto out;
+
+ /* get the first element before delete */
+ first = list_first_entry(&ls->ls_toss_q, struct dlm_rsb,
+ res_toss_q_list);
+ list_del_init(&r->res_toss_q_list);
+ /* check if the first element was the rsb we deleted */
+ if (first == r) {
+ /* try to get the new first element, if the list
+ * is empty now try to delete the timer, if we are
+ * too late we don't care.
+ *
+ * if the list isn't empty and a new first element got
+ * in place, set the new timer expire time.
+ */
+ first = list_first_entry_or_null(&ls->ls_toss_q, struct dlm_rsb,
+ res_toss_q_list);
+ if (!first)
+ timer_delete(&ls->ls_timer);
+ else
+ __rsb_mod_timer(ls, first->res_toss_time);
+ }
+
+out:
+ spin_unlock_bh(&ls->ls_toss_q_lock);
+}
+
+/* Caller must held ls_rsbtbl_lock and need to be called every time
+ * when either the rsb enters toss state or the toss state changes
+ * the dir/master nodeid.
+ */
+static void rsb_mod_timer(struct dlm_ls *ls, struct dlm_rsb *r)
+{
+ int our_nodeid = dlm_our_nodeid();
+ struct dlm_rsb *first;
+
+ /* If we're the directory record for this rsb, and
+ * we're not the master of it, then we need to wait
+ * for the master node to send us a dir remove for
+ * before removing the dir record.
+ */
+ if (!dlm_no_directory(ls) &&
+ (r->res_master_nodeid != our_nodeid) &&
+ (dlm_dir_nodeid(r) == our_nodeid)) {
+ rsb_delete_toss_timer(ls, r);
+ return;
+ }
+
+ spin_lock_bh(&ls->ls_toss_q_lock);
+ /* set the new rsb absolute expire time in the rsb */
+ r->res_toss_time = rsb_toss_jiffies();
+ if (list_empty(&ls->ls_toss_q)) {
+ /* if the queue is empty add the element and it's
+ * our new expire time
+ */
+ list_add_tail(&r->res_toss_q_list, &ls->ls_toss_q);
+ __rsb_mod_timer(ls, r->res_toss_time);
+ } else {
+ /* check if the rsb was already queued, if so delete
+ * it from the toss queue
+ */
+ if (!list_empty(&r->res_toss_q_list))
+ list_del(&r->res_toss_q_list);
+
+ /* try to get the maybe new first element and then add
+ * to this rsb with the oldest expire time to the end
+ * of the queue. If the list was empty before this
+ * rsb expire time is our next expiration if it wasn't
+ * the now new first elemet is our new expiration time
+ */
+ first = list_first_entry_or_null(&ls->ls_toss_q, struct dlm_rsb,
+ res_toss_q_list);
+ list_add_tail(&r->res_toss_q_list, &ls->ls_toss_q);
+ if (!first)
+ __rsb_mod_timer(ls, r->res_toss_time);
+ else
+ __rsb_mod_timer(ls, first->res_toss_time);
+ }
+ spin_unlock_bh(&ls->ls_toss_q_lock);
+}
+
+/* if we hit contention we do in 250 ms a retry to trylock.
+ * if there is any other mod_timer in between we don't care
+ * about that it expires earlier again this is only for the
+ * unlikely case nothing happened in this time.
+ */
+#define DLM_TOSS_TIMER_RETRY (jiffies + msecs_to_jiffies(250))
+
+void dlm_rsb_toss_timer(struct timer_list *timer)
+{
+ struct dlm_ls *ls = from_timer(ls, timer, ls_timer);
+ int our_nodeid = dlm_our_nodeid();
+ struct dlm_rsb *r;
+ int rv;
+
+ while (1) {
+ /* interrupting point to leave iteration when
+ * recovery waits for timer_delete_sync(), recovery
+ * will take care to delete everything in toss queue.
+ */
+ if (dlm_locking_stopped(ls))
+ break;
+
+ rv = spin_trylock(&ls->ls_toss_q_lock);
+ if (!rv) {
+ /* rearm again try timer */
+ __rsb_mod_timer(ls, DLM_TOSS_TIMER_RETRY);
+ break;
+ }
+
+ r = list_first_entry_or_null(&ls->ls_toss_q, struct dlm_rsb,
+ res_toss_q_list);
+ if (!r) {
+ /* nothing to do anymore next rsb queue will
+ * set next mod_timer() expire.
+ */
+ spin_unlock(&ls->ls_toss_q_lock);
+ break;
+ }
+
+ /* test if the first rsb isn't expired yet, if
+ * so we stop freeing rsb from toss queue as
+ * the order in queue is ascending to the
+ * absolute res_toss_time jiffies
+ */
+ if (time_before(jiffies, r->res_toss_time)) {
+ /* rearm with the next rsb to expire in the future */
+ __rsb_mod_timer(ls, r->res_toss_time);
+ spin_unlock(&ls->ls_toss_q_lock);
+ break;
+ }
+
+ /* in find_rsb_dir/nodir there is a reverse order of this
+ * lock, however this is only a trylock if we hit some
+ * possible contention we try it again.
+ *
+ * This lock synchronized while holding ls_toss_q_lock
+ * synchronize everything that rsb_delete_toss_timer()
+ * or rsb_mod_timer() can't run after this timer callback
+ * deletes the rsb from the ls_toss_q. Whereas the other
+ * holders have always a priority to run as this is only
+ * a caching handling and the other holders might to put
+ * this rsb out of the toss state.
+ */
+ rv = write_trylock(&ls->ls_rsbtbl_lock);
+ if (!rv) {
+ spin_unlock(&ls->ls_toss_q_lock);
+ /* rearm again try timer */
+ __rsb_mod_timer(ls, DLM_TOSS_TIMER_RETRY);
+ break;
+ }
+
+ list_del(&r->res_rsbs_list);
+ rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node,
+ dlm_rhash_rsb_params);
+
+ /* not necessary to held the ls_rsbtbl_lock when
+ * calling send_remove()
+ */
+ write_unlock(&ls->ls_rsbtbl_lock);
+
+ /* remove the rsb out of the toss queue its gone
+ * drom DLM now
+ */
+ list_del_init(&r->res_toss_q_list);
+ spin_unlock(&ls->ls_toss_q_lock);
+
+ /* no rsb in this state should ever run a timer */
+ WARN_ON(!dlm_no_directory(ls) &&
+ (r->res_master_nodeid != our_nodeid) &&
+ (dlm_dir_nodeid(r) == our_nodeid));
+
+ /* We're the master of this rsb but we're not
+ * the directory record, so we need to tell the
+ * dir node to remove the dir record
+ */
+ if (!dlm_no_directory(ls) &&
+ (r->res_master_nodeid == our_nodeid) &&
+ (dlm_dir_nodeid(r) != our_nodeid))
+ send_remove(r);
+
+ free_toss_rsb(r);
+ }
+}
+
/* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can
unlock any spinlocks, go back and call pre_rsb_struct again.
Otherwise, take an rsb off the list and return it. */
@@ -395,10 +654,10 @@ static int get_rsb_struct(struct dlm_ls *ls, const void *name, int len,
struct dlm_rsb *r;
int count;
- spin_lock(&ls->ls_new_rsb_spin);
+ spin_lock_bh(&ls->ls_new_rsb_spin);
if (list_empty(&ls->ls_new_rsb)) {
count = ls->ls_new_rsb_count;
- spin_unlock(&ls->ls_new_rsb_spin);
+ spin_unlock_bh(&ls->ls_new_rsb_spin);
log_debug(ls, "find_rsb retry %d %d %s",
count, dlm_config.ci_new_rsb_count,
(const char *)name);
@@ -407,88 +666,44 @@ static int get_rsb_struct(struct dlm_ls *ls, const void *name, int len,
r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain);
list_del(&r->res_hashchain);
- /* Convert the empty list_head to a NULL rb_node for tree usage: */
- memset(&r->res_hashnode, 0, sizeof(struct rb_node));
ls->ls_new_rsb_count--;
- spin_unlock(&ls->ls_new_rsb_spin);
+ spin_unlock_bh(&ls->ls_new_rsb_spin);
r->res_ls = ls;
r->res_length = len;
memcpy(r->res_name, name, len);
- mutex_init(&r->res_mutex);
+ spin_lock_init(&r->res_lock);
INIT_LIST_HEAD(&r->res_lookup);
INIT_LIST_HEAD(&r->res_grantqueue);
INIT_LIST_HEAD(&r->res_convertqueue);
INIT_LIST_HEAD(&r->res_waitqueue);
INIT_LIST_HEAD(&r->res_root_list);
+ INIT_LIST_HEAD(&r->res_toss_q_list);
INIT_LIST_HEAD(&r->res_recover_list);
+ INIT_LIST_HEAD(&r->res_masters_list);
*r_ret = r;
return 0;
}
-static int rsb_cmp(struct dlm_rsb *r, const char *name, int nlen)
+int dlm_search_rsb_tree(struct rhashtable *rhash, const void *name, int len,
+ struct dlm_rsb **r_ret)
{
- char maxname[DLM_RESNAME_MAXLEN];
+ char key[DLM_RESNAME_MAXLEN] = {};
- memset(maxname, 0, DLM_RESNAME_MAXLEN);
- memcpy(maxname, name, nlen);
- return memcmp(r->res_name, maxname, DLM_RESNAME_MAXLEN);
-}
+ memcpy(key, name, len);
+ *r_ret = rhashtable_lookup_fast(rhash, &key, dlm_rhash_rsb_params);
+ if (*r_ret)
+ return 0;
-int dlm_search_rsb_tree(struct rb_root *tree, const void *name, int len,
- struct dlm_rsb **r_ret)
-{
- struct rb_node *node = tree->rb_node;
- struct dlm_rsb *r;
- int rc;
-
- while (node) {
- r = rb_entry(node, struct dlm_rsb, res_hashnode);
- rc = rsb_cmp(r, name, len);
- if (rc < 0)
- node = node->rb_left;
- else if (rc > 0)
- node = node->rb_right;
- else
- goto found;
- }
- *r_ret = NULL;
return -EBADR;
-
- found:
- *r_ret = r;
- return 0;
}
-static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree)
+static int rsb_insert(struct dlm_rsb *rsb, struct rhashtable *rhash)
{
- struct rb_node **newn = &tree->rb_node;
- struct rb_node *parent = NULL;
- int rc;
-
- while (*newn) {
- struct dlm_rsb *cur = rb_entry(*newn, struct dlm_rsb,
- res_hashnode);
-
- parent = *newn;
- rc = rsb_cmp(cur, rsb->res_name, rsb->res_length);
- if (rc < 0)
- newn = &parent->rb_left;
- else if (rc > 0)
- newn = &parent->rb_right;
- else {
- log_print("rsb_insert match");
- dlm_dump_rsb(rsb);
- dlm_dump_rsb(cur);
- return -EEXIST;
- }
- }
-
- rb_link_node(&rsb->res_hashnode, parent, newn);
- rb_insert_color(&rsb->res_hashnode, tree);
- return 0;
+ return rhashtable_insert_fast(rhash, &rsb->res_node,
+ dlm_rhash_rsb_params);
}
/*
@@ -536,8 +751,7 @@ static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree)
*/
static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
- uint32_t hash, uint32_t b,
- int dir_nodeid, int from_nodeid,
+ uint32_t hash, int dir_nodeid, int from_nodeid,
unsigned int flags, struct dlm_rsb **r_ret)
{
struct dlm_rsb *r = NULL;
@@ -584,24 +798,46 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
goto out;
}
- spin_lock(&ls->ls_rsbtbl[b].lock);
+ retry_lookup:
- error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
- if (error)
- goto do_toss;
+ /* check if the rsb is in keep state under read lock - likely path */
+ read_lock_bh(&ls->ls_rsbtbl_lock);
+ error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
+ if (error) {
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
+ goto do_new;
+ }
/*
* rsb is active, so we can't check master_nodeid without lock_rsb.
*/
+ if (rsb_flag(r, RSB_TOSS)) {
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
+ goto do_toss;
+ }
+
kref_get(&r->res_ref);
- goto out_unlock;
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
+ goto out;
do_toss:
- error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
- if (error)
+ write_lock_bh(&ls->ls_rsbtbl_lock);
+
+ /* retry lookup under write lock to see if its still in toss state
+ * if not it's in keep state and we relookup - unlikely path.
+ */
+ error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
+ if (!error) {
+ if (!rsb_flag(r, RSB_TOSS)) {
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
+ goto retry_lookup;
+ }
+ } else {
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
goto do_new;
+ }
/*
* rsb found inactive (master_nodeid may be out of date unless
@@ -616,8 +852,9 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
log_debug(ls, "find_rsb toss from_other %d master %d dir %d %s",
from_nodeid, r->res_master_nodeid, dir_nodeid,
r->res_name);
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
error = -ENOTBLK;
- goto out_unlock;
+ goto out;
}
if ((r->res_master_nodeid != our_nodeid) && from_dir) {
@@ -639,9 +876,17 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
r->res_first_lkid = 0;
}
- rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
- error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
- goto out_unlock;
+ list_move(&r->res_rsbs_list, &ls->ls_keep);
+ rsb_clear_flag(r, RSB_TOSS);
+ /* rsb got out of toss state, it becomes alive again
+ * and we reinit the reference counter that is only
+ * valid for keep state rsbs
+ */
+ kref_init(&r->res_ref);
+ rsb_delete_toss_timer(ls, r);
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
+
+ goto out;
do_new:
@@ -650,18 +895,15 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
*/
if (error == -EBADR && !create)
- goto out_unlock;
+ goto out;
error = get_rsb_struct(ls, name, len, &r);
- if (error == -EAGAIN) {
- spin_unlock(&ls->ls_rsbtbl[b].lock);
+ if (error == -EAGAIN)
goto retry;
- }
if (error)
- goto out_unlock;
+ goto out;
r->res_hash = hash;
- r->res_bucket = b;
r->res_dir_nodeid = dir_nodeid;
kref_init(&r->res_ref);
@@ -681,7 +923,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
dlm_free_rsb(r);
r = NULL;
error = -ENOTBLK;
- goto out_unlock;
+ goto out;
}
if (from_other) {
@@ -701,9 +943,20 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
}
out_add:
- error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
- out_unlock:
- spin_unlock(&ls->ls_rsbtbl[b].lock);
+
+ write_lock_bh(&ls->ls_rsbtbl_lock);
+ error = rsb_insert(r, &ls->ls_rsbtbl);
+ if (error == -EEXIST) {
+ /* somebody else was faster and it seems the
+ * rsb exists now, we do a whole relookup
+ */
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
+ dlm_free_rsb(r);
+ goto retry_lookup;
+ } else if (!error) {
+ list_add(&r->res_rsbs_list, &ls->ls_keep);
+ }
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
out:
*r_ret = r;
return error;
@@ -714,8 +967,7 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
dlm_recover_masters). */
static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
- uint32_t hash, uint32_t b,
- int dir_nodeid, int from_nodeid,
+ uint32_t hash, int dir_nodeid, int from_nodeid,
unsigned int flags, struct dlm_rsb **r_ret)
{
struct dlm_rsb *r = NULL;
@@ -728,24 +980,48 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
if (error < 0)
goto out;
- spin_lock(&ls->ls_rsbtbl[b].lock);
+ retry_lookup:
- error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
- if (error)
+ /* check if the rsb is in keep state under read lock - likely path */
+ read_lock_bh(&ls->ls_rsbtbl_lock);
+ error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
+ if (error) {
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
+ goto do_new;
+ }
+
+ if (rsb_flag(r, RSB_TOSS)) {
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
goto do_toss;
+ }
/*
* rsb is active, so we can't check master_nodeid without lock_rsb.
*/
kref_get(&r->res_ref);
- goto out_unlock;
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
+
+ goto out;
do_toss:
- error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
- if (error)
+ write_lock_bh(&ls->ls_rsbtbl_lock);
+
+ /* retry lookup under write lock to see if its still in toss state
+ * if not it's in keep state and we relookup - unlikely path.
+ */
+ error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
+ if (!error) {
+ if (!rsb_flag(r, RSB_TOSS)) {
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
+ goto retry_lookup;
+ }
+ } else {
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
goto do_new;
+ }
+
/*
* rsb found inactive. No other thread is using this rsb because
@@ -759,8 +1035,9 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
log_error(ls, "find_rsb toss from_nodeid %d master %d dir %d",
from_nodeid, r->res_master_nodeid, dir_nodeid);
dlm_print_rsb(r);
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
error = -ENOTBLK;
- goto out_unlock;
+ goto out;
}
if (!recover && (r->res_master_nodeid != our_nodeid) &&
@@ -774,9 +1051,17 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
r->res_nodeid = 0;
}
- rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
- error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
- goto out_unlock;
+ list_move(&r->res_rsbs_list, &ls->ls_keep);
+ rsb_clear_flag(r, RSB_TOSS);
+ /* rsb got out of toss state, it becomes alive again
+ * and we reinit the reference counter that is only
+ * valid for keep state rsbs
+ */
+ kref_init(&r->res_ref);
+ rsb_delete_toss_timer(ls, r);
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
+
+ goto out;
do_new:
@@ -786,22 +1071,31 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
error = get_rsb_struct(ls, name, len, &r);
if (error == -EAGAIN) {
- spin_unlock(&ls->ls_rsbtbl[b].lock);
goto retry;
}
if (error)
- goto out_unlock;
+ goto out;
r->res_hash = hash;
- r->res_bucket = b;
r->res_dir_nodeid = dir_nodeid;
r->res_master_nodeid = dir_nodeid;
r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid;
kref_init(&r->res_ref);
- error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
- out_unlock:
- spin_unlock(&ls->ls_rsbtbl[b].lock);
+ write_lock_bh(&ls->ls_rsbtbl_lock);
+ error = rsb_insert(r, &ls->ls_rsbtbl);
+ if (error == -EEXIST) {
+ /* somebody else was faster and it seems the
+ * rsb exists now, we do a whole relookup
+ */
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
+ dlm_free_rsb(r);
+ goto retry_lookup;
+ } else if (!error) {
+ list_add(&r->res_rsbs_list, &ls->ls_keep);
+ }
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
+
out:
*r_ret = r;
return error;
@@ -811,23 +1105,21 @@ static int find_rsb(struct dlm_ls *ls, const void *name, int len,
int from_nodeid, unsigned int flags,
struct dlm_rsb **r_ret)
{
- uint32_t hash, b;
int dir_nodeid;
+ uint32_t hash;
if (len > DLM_RESNAME_MAXLEN)
return -EINVAL;
hash = jhash(name, len, 0);
- b = hash & (ls->ls_rsbtbl_size - 1);
-
dir_nodeid = dlm_hash2nodeid(ls, hash);
if (dlm_no_directory(ls))
- return find_rsb_nodir(ls, name, len, hash, b, dir_nodeid,
+ return find_rsb_nodir(ls, name, len, hash, dir_nodeid,
from_nodeid, flags, r_ret);
else
- return find_rsb_dir(ls, name, len, hash, b, dir_nodeid,
- from_nodeid, flags, r_ret);
+ return find_rsb_dir(ls, name, len, hash, dir_nodeid,
+ from_nodeid, flags, r_ret);
}
/* we have received a request and found that res_master_nodeid != our_nodeid,
@@ -988,7 +1280,7 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
int len, unsigned int flags, int *r_nodeid, int *result)
{
struct dlm_rsb *r = NULL;
- uint32_t hash, b;
+ uint32_t hash;
int our_nodeid = dlm_our_nodeid();
int dir_nodeid, error;
@@ -1002,8 +1294,6 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
}
hash = jhash(name, len, 0);
- b = hash & (ls->ls_rsbtbl_size - 1);
-
dir_nodeid = dlm_hash2nodeid(ls, hash);
if (dir_nodeid != our_nodeid) {
log_error(ls, "dlm_master_lookup from %d dir %d our %d h %x %d",
@@ -1018,15 +1308,23 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
if (error < 0)
return error;
- spin_lock(&ls->ls_rsbtbl[b].lock);
- error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
+ retry_lookup:
+
+ /* check if the rsb is in keep state under read lock - likely path */
+ read_lock_bh(&ls->ls_rsbtbl_lock);
+ error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
if (!error) {
+ if (rsb_flag(r, RSB_TOSS)) {
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
+ goto do_toss;
+ }
+
/* because the rsb is active, we need to lock_rsb before
* checking/changing re_master_nodeid
*/
hold_rsb(r);
- spin_unlock(&ls->ls_rsbtbl[b].lock);
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
lock_rsb(r);
__dlm_master_lookup(ls, r, our_nodeid, from_nodeid, false,
@@ -1037,11 +1335,31 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
put_rsb(r);
return 0;
+ } else {
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
+ goto not_found;
}
- error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
- if (error)
+ do_toss:
+ /* unlikely path - relookup under write */
+ write_lock_bh(&ls->ls_rsbtbl_lock);
+
+ /* rsb_mod_timer() requires to held ls_rsbtbl_lock in write lock
+ * check if the rsb is still in toss state, if not relookup
+ */
+ error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
+ if (!error) {
+ if (!rsb_flag(r, RSB_TOSS)) {
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
+ /* something as changed, very unlikely but
+ * try again
+ */
+ goto retry_lookup;
+ }
+ } else {
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
goto not_found;
+ }
/* because the rsb is inactive (on toss list), it's not refcounted
* and lock_rsb is not used, but is protected by the rsbtbl lock
@@ -1050,83 +1368,78 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
__dlm_master_lookup(ls, r, our_nodeid, from_nodeid, true, flags,
r_nodeid, result);
- r->res_toss_time = jiffies;
+ rsb_mod_timer(ls, r);
/* the rsb was inactive (on toss list) */
- spin_unlock(&ls->ls_rsbtbl[b].lock);
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
return 0;
not_found:
error = get_rsb_struct(ls, name, len, &r);
- if (error == -EAGAIN) {
- spin_unlock(&ls->ls_rsbtbl[b].lock);
+ if (error == -EAGAIN)
goto retry;
- }
if (error)
- goto out_unlock;
+ goto out;
r->res_hash = hash;
- r->res_bucket = b;
r->res_dir_nodeid = our_nodeid;
r->res_master_nodeid = from_nodeid;
r->res_nodeid = from_nodeid;
kref_init(&r->res_ref);
- r->res_toss_time = jiffies;
+ rsb_set_flag(r, RSB_TOSS);
- error = rsb_insert(r, &ls->ls_rsbtbl[b].toss);
- if (error) {
+ write_lock_bh(&ls->ls_rsbtbl_lock);
+ error = rsb_insert(r, &ls->ls_rsbtbl);
+ if (error == -EEXIST) {
+ /* somebody else was faster and it seems the
+ * rsb exists now, we do a whole relookup
+ */
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
+ dlm_free_rsb(r);
+ goto retry_lookup;
+ } else if (error) {
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
/* should never happen */
dlm_free_rsb(r);
- spin_unlock(&ls->ls_rsbtbl[b].lock);
goto retry;
}
+ list_add(&r->res_rsbs_list, &ls->ls_toss);
+ rsb_mod_timer(ls, r);
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
+
if (result)
*result = DLM_LU_ADD;
*r_nodeid = from_nodeid;
- out_unlock:
- spin_unlock(&ls->ls_rsbtbl[b].lock);
+ out:
return error;
}
static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash)
{
- struct rb_node *n;
struct dlm_rsb *r;
- int i;
- for (i = 0; i < ls->ls_rsbtbl_size; i++) {
- spin_lock(&ls->ls_rsbtbl[i].lock);
- for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
- r = rb_entry(n, struct dlm_rsb, res_hashnode);
- if (r->res_hash == hash)
- dlm_dump_rsb(r);
- }
- spin_unlock(&ls->ls_rsbtbl[i].lock);
+ read_lock_bh(&ls->ls_rsbtbl_lock);
+ list_for_each_entry(r, &ls->ls_keep, res_rsbs_list) {
+ if (r->res_hash == hash)
+ dlm_dump_rsb(r);
}
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
}
void dlm_dump_rsb_name(struct dlm_ls *ls, const char *name, int len)
{
struct dlm_rsb *r = NULL;
- uint32_t hash, b;
int error;
- hash = jhash(name, len, 0);
- b = hash & (ls->ls_rsbtbl_size - 1);
-
- spin_lock(&ls->ls_rsbtbl[b].lock);
- error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
+ read_lock_bh(&ls->ls_rsbtbl_lock);
+ error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
if (!error)
- goto out_dump;
-
- error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
- if (error)
goto out;
- out_dump:
+
dlm_dump_rsb(r);
out:
- spin_unlock(&ls->ls_rsbtbl[b].lock);
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
}
static void toss_rsb(struct kref *kref)
@@ -1135,11 +1448,10 @@ static void toss_rsb(struct kref *kref)
struct dlm_ls *ls = r->res_ls;
DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
- kref_init(&r->res_ref);
- rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].keep);
- rsb_insert(r, &ls->ls_rsbtbl[r->res_bucket].toss);
- r->res_toss_time = jiffies;
- set_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[r->res_bucket].flags);
+ rsb_set_flag(r, RSB_TOSS);
+ list_move(&r->res_rsbs_list, &ls->ls_toss);
+ rsb_mod_timer(ls, r);
+
if (r->res_lvbptr) {
dlm_free_lvb(r->res_lvbptr);
r->res_lvbptr = NULL;
@@ -1151,23 +1463,27 @@ static void toss_rsb(struct kref *kref)
static void unhold_rsb(struct dlm_rsb *r)
{
int rv;
+
+ /* rsbs in toss state never get referenced */
+ WARN_ON(rsb_flag(r, RSB_TOSS));
rv = kref_put(&r->res_ref, toss_rsb);
DLM_ASSERT(!rv, dlm_dump_rsb(r););
}
-static void kill_rsb(struct kref *kref)
+void free_toss_rsb(struct dlm_rsb *r)
{
- struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
-
- /* All work is done after the return from kref_put() so we
- can release the write_lock before the remove and free. */
+ WARN_ON_ONCE(!rsb_flag(r, RSB_TOSS));
DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
+ DLM_ASSERT(list_empty(&r->res_toss_q_list), dlm_dump_rsb(r););
DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
+ DLM_ASSERT(list_empty(&r->res_masters_list), dlm_dump_rsb(r););
+
+ dlm_free_rsb(r);
}
/* Attaching/detaching lkb's from rsb's is for rsb reference counting.
@@ -1197,24 +1513,20 @@ static int _create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret,
if (!lkb)
return -ENOMEM;
- lkb->lkb_last_bast_mode = -1;
+ lkb->lkb_last_bast_cb_mode = DLM_LOCK_IV;
+ lkb->lkb_last_cast_cb_mode = DLM_LOCK_IV;
+ lkb->lkb_last_cb_mode = DLM_LOCK_IV;
lkb->lkb_nodeid = -1;
lkb->lkb_grmode = DLM_LOCK_IV;
kref_init(&lkb->lkb_ref);
INIT_LIST_HEAD(&lkb->lkb_ownqueue);
INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
- INIT_LIST_HEAD(&lkb->lkb_cb_list);
- INIT_LIST_HEAD(&lkb->lkb_callbacks);
- spin_lock_init(&lkb->lkb_cb_lock);
- INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
- idr_preload(GFP_NOFS);
- spin_lock(&ls->ls_lkbidr_spin);
+ write_lock_bh(&ls->ls_lkbidr_lock);
rv = idr_alloc(&ls->ls_lkbidr, lkb, start, end, GFP_NOWAIT);
if (rv >= 0)
lkb->lkb_id = rv;
- spin_unlock(&ls->ls_lkbidr_spin);
- idr_preload_end();
+ write_unlock_bh(&ls->ls_lkbidr_lock);
if (rv < 0) {
log_error(ls, "create_lkb idr error %d", rv);
@@ -1235,11 +1547,11 @@ static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
{
struct dlm_lkb *lkb;
- spin_lock(&ls->ls_lkbidr_spin);
+ read_lock_bh(&ls->ls_lkbidr_lock);
lkb = idr_find(&ls->ls_lkbidr, lkid);
if (lkb)
kref_get(&lkb->lkb_ref);
- spin_unlock(&ls->ls_lkbidr_spin);
+ read_unlock_bh(&ls->ls_lkbidr_lock);
*lkb_ret = lkb;
return lkb ? 0 : -ENOENT;
@@ -1263,11 +1575,11 @@ static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
uint32_t lkid = lkb->lkb_id;
int rv;
- rv = kref_put_lock(&lkb->lkb_ref, kill_lkb,
- &ls->ls_lkbidr_spin);
+ rv = dlm_kref_put_write_lock_bh(&lkb->lkb_ref, kill_lkb,
+ &ls->ls_lkbidr_lock);
if (rv) {
idr_remove(&ls->ls_lkbidr, lkid);
- spin_unlock(&ls->ls_lkbidr_spin);
+ write_unlock_bh(&ls->ls_lkbidr_lock);
detach_lkb(lkb);
@@ -1408,7 +1720,7 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
int error = 0;
- mutex_lock(&ls->ls_waiters_mutex);
+ spin_lock_bh(&ls->ls_waiters_lock);
if (is_overlap_unlock(lkb) ||
(is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
@@ -1451,7 +1763,7 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
log_error(ls, "addwait error %x %d flags %x %d %d %s",
lkb->lkb_id, error, dlm_iflags_val(lkb), mstype,
lkb->lkb_wait_type, lkb->lkb_resource->res_name);
- mutex_unlock(&ls->ls_waiters_mutex);
+ spin_unlock_bh(&ls->ls_waiters_lock);
return error;
}
@@ -1551,14 +1863,18 @@ static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
int error;
- mutex_lock(&ls->ls_waiters_mutex);
+ spin_lock_bh(&ls->ls_waiters_lock);
error = _remove_from_waiters(lkb, mstype, NULL);
- mutex_unlock(&ls->ls_waiters_mutex);
+ spin_unlock_bh(&ls->ls_waiters_lock);
return error;
}
/* Handles situations where we might be processing a "fake" or "local" reply in
- which we can't try to take waiters_mutex again. */
+ * the recovery context which stops any locking activity. Only debugfs might
+ * change the lockspace waiters but they will held the recovery lock to ensure
+ * remove_from_waiters_ms() in local case will be the only user manipulating the
+ * lockspace waiters in recovery context.
+ */
static int remove_from_waiters_ms(struct dlm_lkb *lkb,
const struct dlm_message *ms, bool local)
@@ -1567,159 +1883,16 @@ static int remove_from_waiters_ms(struct dlm_lkb *lkb,
int error;
if (!local)
- mutex_lock(&ls->ls_waiters_mutex);
+ spin_lock_bh(&ls->ls_waiters_lock);
+ else
+ WARN_ON_ONCE(!rwsem_is_locked(&ls->ls_in_recovery) ||
+ !dlm_locking_stopped(ls));
error = _remove_from_waiters(lkb, le32_to_cpu(ms->m_type), ms);
if (!local)
- mutex_unlock(&ls->ls_waiters_mutex);
+ spin_unlock_bh(&ls->ls_waiters_lock);
return error;
}
-static void shrink_bucket(struct dlm_ls *ls, int b)
-{
- struct rb_node *n, *next;
- struct dlm_rsb *r;
- char *name;
- int our_nodeid = dlm_our_nodeid();
- int remote_count = 0;
- int need_shrink = 0;
- int i, len, rv;
-
- memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX);
-
- spin_lock(&ls->ls_rsbtbl[b].lock);
-
- if (!test_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags)) {
- spin_unlock(&ls->ls_rsbtbl[b].lock);
- return;
- }
-
- for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = next) {
- next = rb_next(n);
- r = rb_entry(n, struct dlm_rsb, res_hashnode);
-
- /* If we're the directory record for this rsb, and
- we're not the master of it, then we need to wait
- for the master node to send us a dir remove for
- before removing the dir record. */
-
- if (!dlm_no_directory(ls) &&
- (r->res_master_nodeid != our_nodeid) &&
- (dlm_dir_nodeid(r) == our_nodeid)) {
- continue;
- }
-
- need_shrink = 1;
-
- if (!time_after_eq(jiffies, r->res_toss_time +
- dlm_config.ci_toss_secs * HZ)) {
- continue;
- }
-
- if (!dlm_no_directory(ls) &&
- (r->res_master_nodeid == our_nodeid) &&
- (dlm_dir_nodeid(r) != our_nodeid)) {
-
- /* We're the master of this rsb but we're not
- the directory record, so we need to tell the
- dir node to remove the dir record. */
-
- ls->ls_remove_lens[remote_count] = r->res_length;
- memcpy(ls->ls_remove_names[remote_count], r->res_name,
- DLM_RESNAME_MAXLEN);
- remote_count++;
-
- if (remote_count >= DLM_REMOVE_NAMES_MAX)
- break;
- continue;
- }
-
- if (!kref_put(&r->res_ref, kill_rsb)) {
- log_error(ls, "tossed rsb in use %s", r->res_name);
- continue;
- }
-
- rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
- dlm_free_rsb(r);
- }
-
- if (need_shrink)
- set_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags);
- else
- clear_bit(DLM_RTF_SHRINK_BIT, &ls->ls_rsbtbl[b].flags);
- spin_unlock(&ls->ls_rsbtbl[b].lock);
-
- /*
- * While searching for rsb's to free, we found some that require
- * remote removal. We leave them in place and find them again here
- * so there is a very small gap between removing them from the toss
- * list and sending the removal. Keeping this gap small is
- * important to keep us (the master node) from being out of sync
- * with the remote dir node for very long.
- */
-
- for (i = 0; i < remote_count; i++) {
- name = ls->ls_remove_names[i];
- len = ls->ls_remove_lens[i];
-
- spin_lock(&ls->ls_rsbtbl[b].lock);
- rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
- if (rv) {
- spin_unlock(&ls->ls_rsbtbl[b].lock);
- log_debug(ls, "remove_name not toss %s", name);
- continue;
- }
-
- if (r->res_master_nodeid != our_nodeid) {
- spin_unlock(&ls->ls_rsbtbl[b].lock);
- log_debug(ls, "remove_name master %d dir %d our %d %s",
- r->res_master_nodeid, r->res_dir_nodeid,
- our_nodeid, name);
- continue;
- }
-
- if (r->res_dir_nodeid == our_nodeid) {
- /* should never happen */
- spin_unlock(&ls->ls_rsbtbl[b].lock);
- log_error(ls, "remove_name dir %d master %d our %d %s",
- r->res_dir_nodeid, r->res_master_nodeid,
- our_nodeid, name);
- continue;
- }
-
- if (!time_after_eq(jiffies, r->res_toss_time +
- dlm_config.ci_toss_secs * HZ)) {
- spin_unlock(&ls->ls_rsbtbl[b].lock);
- log_debug(ls, "remove_name toss_time %lu now %lu %s",
- r->res_toss_time, jiffies, name);
- continue;
- }
-
- if (!kref_put(&r->res_ref, kill_rsb)) {
- spin_unlock(&ls->ls_rsbtbl[b].lock);
- log_error(ls, "remove_name in use %s", name);
- continue;
- }
-
- rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
- send_remove(r);
- spin_unlock(&ls->ls_rsbtbl[b].lock);
-
- dlm_free_rsb(r);
- }
-}
-
-void dlm_scan_rsbs(struct dlm_ls *ls)
-{
- int i;
-
- for (i = 0; i < ls->ls_rsbtbl_size; i++) {
- shrink_bucket(ls, i);
- if (dlm_locking_stopped(ls))
- break;
- cond_resched();
- }
-}
-
/* lkb is master or local copy */
static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
@@ -2538,7 +2711,6 @@ static void process_lookup_list(struct dlm_rsb *r)
list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
list_del_init(&lkb->lkb_rsb_lookup);
_request_lock(r, lkb);
- schedule();
}
}
@@ -3332,8 +3504,7 @@ int dlm_unlock(dlm_lockspace_t *lockspace,
static int _create_message(struct dlm_ls *ls, int mb_len,
int to_nodeid, int mstype,
struct dlm_message **ms_ret,
- struct dlm_mhandle **mh_ret,
- gfp_t allocation)
+ struct dlm_mhandle **mh_ret)
{
struct dlm_message *ms;
struct dlm_mhandle *mh;
@@ -3343,7 +3514,7 @@ static int _create_message(struct dlm_ls *ls, int mb_len,
pass into midcomms_commit and a message buffer (mb) that we
write our data into */
- mh = dlm_midcomms_get_mhandle(to_nodeid, mb_len, allocation, &mb);
+ mh = dlm_midcomms_get_mhandle(to_nodeid, mb_len, &mb);
if (!mh)
return -ENOBUFS;
@@ -3365,8 +3536,7 @@ static int _create_message(struct dlm_ls *ls, int mb_len,
static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
int to_nodeid, int mstype,
struct dlm_message **ms_ret,
- struct dlm_mhandle **mh_ret,
- gfp_t allocation)
+ struct dlm_mhandle **mh_ret)
{
int mb_len = sizeof(struct dlm_message);
@@ -3387,7 +3557,7 @@ static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
}
return _create_message(r->res_ls, mb_len, to_nodeid, mstype,
- ms_ret, mh_ret, allocation);
+ ms_ret, mh_ret);
}
/* further lowcomms enhancements or alternate implementations may make
@@ -3456,7 +3626,7 @@ static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
if (error)
return error;
- error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh, GFP_NOFS);
+ error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
if (error)
goto fail;
@@ -3516,8 +3686,7 @@ static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
to_nodeid = lkb->lkb_nodeid;
- error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh,
- GFP_NOFS);
+ error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh);
if (error)
goto out;
@@ -3538,8 +3707,7 @@ static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
to_nodeid = lkb->lkb_nodeid;
- error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh,
- GFP_NOFS);
+ error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh);
if (error)
goto out;
@@ -3564,8 +3732,7 @@ static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
if (error)
return error;
- error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh,
- GFP_NOFS);
+ error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh);
if (error)
goto fail;
@@ -3589,8 +3756,7 @@ static int send_remove(struct dlm_rsb *r)
to_nodeid = dlm_dir_nodeid(r);
- error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh,
- GFP_ATOMIC);
+ error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh);
if (error)
goto out;
@@ -3611,7 +3777,7 @@ static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
to_nodeid = lkb->lkb_nodeid;
- error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh, GFP_NOFS);
+ error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
if (error)
goto out;
@@ -3653,8 +3819,7 @@ static int send_lookup_reply(struct dlm_ls *ls,
struct dlm_mhandle *mh;
int error, nodeid = le32_to_cpu(ms_in->m_header.h_nodeid);
- error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh,
- GFP_NOFS);
+ error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh);
if (error)
goto out;
@@ -4139,7 +4304,6 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms)
{
char name[DLM_RESNAME_MAXLEN+1];
struct dlm_rsb *r;
- uint32_t hash, b;
int rv, len, dir_nodeid, from_nodeid;
from_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
@@ -4159,47 +4323,44 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms)
return;
}
- /* Look for name on rsbtbl.toss, if it's there, kill it.
- If it's on rsbtbl.keep, it's being used, and we should ignore this
- message. This is an expected race between the dir node sending a
- request to the master node at the same time as the master node sends
- a remove to the dir node. The resolution to that race is for the
- dir node to ignore the remove message, and the master node to
- recreate the master rsb when it gets a request from the dir node for
- an rsb it doesn't have. */
+ /* Look for name in rsb toss state, if it's there, kill it.
+ * If it's in non toss state, it's being used, and we should ignore this
+ * message. This is an expected race between the dir node sending a
+ * request to the master node at the same time as the master node sends
+ * a remove to the dir node. The resolution to that race is for the
+ * dir node to ignore the remove message, and the master node to
+ * recreate the master rsb when it gets a request from the dir node for
+ * an rsb it doesn't have.
+ */
memset(name, 0, sizeof(name));
memcpy(name, ms->m_extra, len);
- hash = jhash(name, len, 0);
- b = hash & (ls->ls_rsbtbl_size - 1);
-
- spin_lock(&ls->ls_rsbtbl[b].lock);
+ write_lock_bh(&ls->ls_rsbtbl_lock);
- rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
+ rv = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
if (rv) {
- /* verify the rsb is on keep list per comment above */
- rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
- if (rv) {
- /* should not happen */
- log_error(ls, "receive_remove from %d not found %s",
- from_nodeid, name);
- spin_unlock(&ls->ls_rsbtbl[b].lock);
- return;
- }
+ /* should not happen */
+ log_error(ls, "%s from %d not found %s", __func__,
+ from_nodeid, name);
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
+ return;
+ }
+
+ if (!rsb_flag(r, RSB_TOSS)) {
if (r->res_master_nodeid != from_nodeid) {
/* should not happen */
log_error(ls, "receive_remove keep from %d master %d",
from_nodeid, r->res_master_nodeid);
dlm_print_rsb(r);
- spin_unlock(&ls->ls_rsbtbl[b].lock);
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
return;
}
log_debug(ls, "receive_remove from %d master %d first %x %s",
from_nodeid, r->res_master_nodeid, r->res_first_lkid,
name);
- spin_unlock(&ls->ls_rsbtbl[b].lock);
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
return;
}
@@ -4207,20 +4368,16 @@ static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms)
log_error(ls, "receive_remove toss from %d master %d",
from_nodeid, r->res_master_nodeid);
dlm_print_rsb(r);
- spin_unlock(&ls->ls_rsbtbl[b].lock);
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
return;
}
- if (kref_put(&r->res_ref, kill_rsb)) {
- rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
- spin_unlock(&ls->ls_rsbtbl[b].lock);
- dlm_free_rsb(r);
- } else {
- log_error(ls, "receive_remove from %d rsb ref error",
- from_nodeid);
- dlm_print_rsb(r);
- spin_unlock(&ls->ls_rsbtbl[b].lock);
- }
+ list_del(&r->res_rsbs_list);
+ rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node,
+ dlm_rhash_rsb_params);
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
+
+ free_toss_rsb(r);
}
static void receive_purge(struct dlm_ls *ls, const struct dlm_message *ms)
@@ -4407,7 +4564,6 @@ static void _receive_convert_reply(struct dlm_lkb *lkb,
if (error)
goto out;
- /* local reply can happen with waiters_mutex held */
error = remove_from_waiters_ms(lkb, ms, local);
if (error)
goto out;
@@ -4446,7 +4602,6 @@ static void _receive_unlock_reply(struct dlm_lkb *lkb,
if (error)
goto out;
- /* local reply can happen with waiters_mutex held */
error = remove_from_waiters_ms(lkb, ms, local);
if (error)
goto out;
@@ -4498,7 +4653,6 @@ static void _receive_cancel_reply(struct dlm_lkb *lkb,
if (error)
goto out;
- /* local reply can happen with waiters_mutex held */
error = remove_from_waiters_ms(lkb, ms, local);
if (error)
goto out;
@@ -4757,20 +4911,32 @@ static void _receive_message(struct dlm_ls *ls, const struct dlm_message *ms,
static void dlm_receive_message(struct dlm_ls *ls, const struct dlm_message *ms,
int nodeid)
{
- if (dlm_locking_stopped(ls)) {
+try_again:
+ read_lock_bh(&ls->ls_requestqueue_lock);
+ if (test_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags)) {
/* If we were a member of this lockspace, left, and rejoined,
other nodes may still be sending us messages from the
lockspace generation before we left. */
if (WARN_ON_ONCE(!ls->ls_generation)) {
+ read_unlock_bh(&ls->ls_requestqueue_lock);
log_limit(ls, "receive %d from %d ignore old gen",
le32_to_cpu(ms->m_type), nodeid);
return;
}
+ read_unlock_bh(&ls->ls_requestqueue_lock);
+ write_lock_bh(&ls->ls_requestqueue_lock);
+ /* recheck because we hold writelock now */
+ if (!test_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags)) {
+ write_unlock_bh(&ls->ls_requestqueue_lock);
+ goto try_again;
+ }
+
dlm_add_requestqueue(ls, nodeid, ms);
+ write_unlock_bh(&ls->ls_requestqueue_lock);
} else {
- dlm_wait_requestqueue(ls);
_receive_message(ls, ms, 0);
+ read_unlock_bh(&ls->ls_requestqueue_lock);
}
}
@@ -4830,7 +4996,7 @@ void dlm_receive_buffer(const union dlm_packet *p, int nodeid)
/* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
be inactive (in this ls) before transitioning to recovery mode */
- down_read(&ls->ls_recv_active);
+ read_lock_bh(&ls->ls_recv_active);
if (hd->h_cmd == DLM_MSG)
dlm_receive_message(ls, &p->message, nodeid);
else if (hd->h_cmd == DLM_RCOM)
@@ -4838,7 +5004,7 @@ void dlm_receive_buffer(const union dlm_packet *p, int nodeid)
else
log_error(ls, "invalid h_cmd %d from %d lockspace %x",
hd->h_cmd, nodeid, le32_to_cpu(hd->u.h_lockspace));
- up_read(&ls->ls_recv_active);
+ read_unlock_bh(&ls->ls_recv_active);
dlm_put_lockspace(ls);
}
@@ -4899,8 +5065,6 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
if (!ms_local)
return;
- mutex_lock(&ls->ls_waiters_mutex);
-
list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
dir_nodeid = dlm_dir_nodeid(lkb->lkb_resource);
@@ -4993,7 +5157,6 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
}
schedule();
}
- mutex_unlock(&ls->ls_waiters_mutex);
kfree(ms_local);
}
@@ -5001,7 +5164,7 @@ static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
{
struct dlm_lkb *lkb = NULL, *iter;
- mutex_lock(&ls->ls_waiters_mutex);
+ spin_lock_bh(&ls->ls_waiters_lock);
list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) {
if (test_bit(DLM_IFL_RESEND_BIT, &iter->lkb_iflags)) {
hold_lkb(iter);
@@ -5009,7 +5172,7 @@ static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
break;
}
}
- mutex_unlock(&ls->ls_waiters_mutex);
+ spin_unlock_bh(&ls->ls_waiters_lock);
return lkb;
}
@@ -5109,9 +5272,9 @@ int dlm_recover_waiters_post(struct dlm_ls *ls)
}
/* Forcibly remove from waiters list */
- mutex_lock(&ls->ls_waiters_mutex);
+ spin_lock_bh(&ls->ls_waiters_lock);
list_del_init(&lkb->lkb_wait_reply);
- mutex_unlock(&ls->ls_waiters_mutex);
+ spin_unlock_bh(&ls->ls_waiters_lock);
/*
* The lkb is now clear of all prior waiters state and can be
@@ -5236,7 +5399,7 @@ static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r,
/* Get rid of locks held by nodes that are gone. */
-void dlm_recover_purge(struct dlm_ls *ls)
+void dlm_recover_purge(struct dlm_ls *ls, const struct list_head *root_list)
{
struct dlm_rsb *r;
struct dlm_member *memb;
@@ -5255,8 +5418,7 @@ void dlm_recover_purge(struct dlm_ls *ls)
if (!nodes_count)
return;
- down_write(&ls->ls_root_sem);
- list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
+ list_for_each_entry(r, root_list, res_root_list) {
hold_rsb(r);
lock_rsb(r);
if (is_master(r)) {
@@ -5271,22 +5433,18 @@ void dlm_recover_purge(struct dlm_ls *ls)
unhold_rsb(r);
cond_resched();
}
- up_write(&ls->ls_root_sem);
if (lkb_count)
log_rinfo(ls, "dlm_recover_purge %u locks for %u nodes",
lkb_count, nodes_count);
}
-static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket)
+static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls)
{
- struct rb_node *n;
struct dlm_rsb *r;
- spin_lock(&ls->ls_rsbtbl[bucket].lock);
- for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) {
- r = rb_entry(n, struct dlm_rsb, res_hashnode);
-
+ read_lock_bh(&ls->ls_rsbtbl_lock);
+ list_for_each_entry(r, &ls->ls_keep, res_rsbs_list) {
if (!rsb_flag(r, RSB_RECOVER_GRANT))
continue;
if (!is_master(r)) {
@@ -5294,10 +5452,10 @@ static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket)
continue;
}
hold_rsb(r);
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
return r;
}
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
return NULL;
}
@@ -5321,19 +5479,15 @@ static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket)
void dlm_recover_grant(struct dlm_ls *ls)
{
struct dlm_rsb *r;
- int bucket = 0;
unsigned int count = 0;
unsigned int rsb_count = 0;
unsigned int lkb_count = 0;
while (1) {
- r = find_grant_rsb(ls, bucket);
- if (!r) {
- if (bucket == ls->ls_rsbtbl_size - 1)
- break;
- bucket++;
- continue;
- }
+ r = find_grant_rsb(ls);
+ if (!r)
+ break;
+
rsb_count++;
count = 0;
lock_rsb(r);
@@ -5641,10 +5795,10 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
}
/* add this new lkb to the per-process list of locks */
- spin_lock(&ua->proc->locks_spin);
+ spin_lock_bh(&ua->proc->locks_spin);
hold_lkb(lkb);
list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
- spin_unlock(&ua->proc->locks_spin);
+ spin_unlock_bh(&ua->proc->locks_spin);
do_put = false;
out_put:
trace_dlm_lock_end(ls, lkb, name, namelen, mode, flags, error, false);
@@ -5726,7 +5880,7 @@ int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
int found_other_mode = 0;
int rv = 0;
- mutex_lock(&ls->ls_orphans_mutex);
+ spin_lock_bh(&ls->ls_orphans_lock);
list_for_each_entry(iter, &ls->ls_orphans, lkb_ownqueue) {
if (iter->lkb_resource->res_length != namelen)
continue;
@@ -5743,7 +5897,7 @@ int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
*lkid = iter->lkb_id;
break;
}
- mutex_unlock(&ls->ls_orphans_mutex);
+ spin_unlock_bh(&ls->ls_orphans_lock);
if (!lkb && found_other_mode) {
rv = -EAGAIN;
@@ -5774,9 +5928,9 @@ int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
* for the proc locks list.
*/
- spin_lock(&ua->proc->locks_spin);
+ spin_lock_bh(&ua->proc->locks_spin);
list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
- spin_unlock(&ua->proc->locks_spin);
+ spin_unlock_bh(&ua->proc->locks_spin);
out:
kfree(ua_tmp);
return rv;
@@ -5820,11 +5974,11 @@ int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
if (error)
goto out_put;
- spin_lock(&ua->proc->locks_spin);
+ spin_lock_bh(&ua->proc->locks_spin);
/* dlm_user_add_cb() may have already taken lkb off the proc list */
if (!list_empty(&lkb->lkb_ownqueue))
list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
- spin_unlock(&ua->proc->locks_spin);
+ spin_unlock_bh(&ua->proc->locks_spin);
out_put:
trace_dlm_unlock_end(ls, lkb, flags, error);
dlm_put_lkb(lkb);
@@ -5935,9 +6089,9 @@ static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
int error;
hold_lkb(lkb); /* reference for the ls_orphans list */
- mutex_lock(&ls->ls_orphans_mutex);
+ spin_lock_bh(&ls->ls_orphans_lock);
list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans);
- mutex_unlock(&ls->ls_orphans_mutex);
+ spin_unlock_bh(&ls->ls_orphans_lock);
set_unlock_args(0, lkb->lkb_ua, &args);
@@ -5975,7 +6129,7 @@ static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
{
struct dlm_lkb *lkb = NULL;
- spin_lock(&ls->ls_clear_proc_locks);
+ spin_lock_bh(&ls->ls_clear_proc_locks);
if (list_empty(&proc->locks))
goto out;
@@ -5987,7 +6141,7 @@ static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
else
set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags);
out:
- spin_unlock(&ls->ls_clear_proc_locks);
+ spin_unlock_bh(&ls->ls_clear_proc_locks);
return lkb;
}
@@ -6003,6 +6157,7 @@ static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
{
+ struct dlm_callback *cb, *cb_safe;
struct dlm_lkb *lkb, *safe;
dlm_lock_recovery(ls);
@@ -6023,7 +6178,7 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
dlm_put_lkb(lkb);
}
- spin_lock(&ls->ls_clear_proc_locks);
+ spin_lock_bh(&ls->ls_clear_proc_locks);
/* in-progress unlocks */
list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
@@ -6032,29 +6187,29 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
dlm_put_lkb(lkb);
}
- list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
- dlm_purge_lkb_callbacks(lkb);
- list_del_init(&lkb->lkb_cb_list);
- dlm_put_lkb(lkb);
+ list_for_each_entry_safe(cb, cb_safe, &proc->asts, list) {
+ list_del(&cb->list);
+ dlm_free_cb(cb);
}
- spin_unlock(&ls->ls_clear_proc_locks);
+ spin_unlock_bh(&ls->ls_clear_proc_locks);
dlm_unlock_recovery(ls);
}
static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
{
+ struct dlm_callback *cb, *cb_safe;
struct dlm_lkb *lkb, *safe;
while (1) {
lkb = NULL;
- spin_lock(&proc->locks_spin);
+ spin_lock_bh(&proc->locks_spin);
if (!list_empty(&proc->locks)) {
lkb = list_entry(proc->locks.next, struct dlm_lkb,
lkb_ownqueue);
list_del_init(&lkb->lkb_ownqueue);
}
- spin_unlock(&proc->locks_spin);
+ spin_unlock_bh(&proc->locks_spin);
if (!lkb)
break;
@@ -6064,21 +6219,20 @@ static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
dlm_put_lkb(lkb); /* ref from proc->locks list */
}
- spin_lock(&proc->locks_spin);
+ spin_lock_bh(&proc->locks_spin);
list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
list_del_init(&lkb->lkb_ownqueue);
set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags);
dlm_put_lkb(lkb);
}
- spin_unlock(&proc->locks_spin);
+ spin_unlock_bh(&proc->locks_spin);
- spin_lock(&proc->asts_spin);
- list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
- dlm_purge_lkb_callbacks(lkb);
- list_del_init(&lkb->lkb_cb_list);
- dlm_put_lkb(lkb);
+ spin_lock_bh(&proc->asts_spin);
+ list_for_each_entry_safe(cb, cb_safe, &proc->asts, list) {
+ list_del(&cb->list);
+ dlm_free_cb(cb);
}
- spin_unlock(&proc->asts_spin);
+ spin_unlock_bh(&proc->asts_spin);
}
/* pid of 0 means purge all orphans */
@@ -6087,7 +6241,7 @@ static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
{
struct dlm_lkb *lkb, *safe;
- mutex_lock(&ls->ls_orphans_mutex);
+ spin_lock_bh(&ls->ls_orphans_lock);
list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) {
if (pid && lkb->lkb_ownpid != pid)
continue;
@@ -6095,7 +6249,7 @@ static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
list_del_init(&lkb->lkb_ownqueue);
dlm_put_lkb(lkb);
}
- mutex_unlock(&ls->ls_orphans_mutex);
+ spin_unlock_bh(&ls->ls_orphans_lock);
}
static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
@@ -6105,7 +6259,7 @@ static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
int error;
error = _create_message(ls, sizeof(struct dlm_message), nodeid,
- DLM_MSG_PURGE, &ms, &mh, GFP_NOFS);
+ DLM_MSG_PURGE, &ms, &mh);
if (error)
return error;
ms->m_nodeid = cpu_to_le32(nodeid);
diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h
index b54e2cbbe6e2..8de9dee4c058 100644
--- a/fs/dlm/lock.h
+++ b/fs/dlm/lock.h
@@ -11,6 +11,7 @@
#ifndef __LOCK_DOT_H__
#define __LOCK_DOT_H__
+void dlm_rsb_toss_timer(struct timer_list *timer);
void dlm_dump_rsb(struct dlm_rsb *r);
void dlm_dump_rsb_name(struct dlm_ls *ls, const char *name, int len);
void dlm_print_lkb(struct dlm_lkb *lkb);
@@ -18,20 +19,23 @@ void dlm_receive_message_saved(struct dlm_ls *ls, const struct dlm_message *ms,
uint32_t saved_seq);
void dlm_receive_buffer(const union dlm_packet *p, int nodeid);
int dlm_modes_compat(int mode1, int mode2);
+void free_toss_rsb(struct dlm_rsb *r);
void dlm_put_rsb(struct dlm_rsb *r);
void dlm_hold_rsb(struct dlm_rsb *r);
int dlm_put_lkb(struct dlm_lkb *lkb);
void dlm_scan_rsbs(struct dlm_ls *ls);
int dlm_lock_recovery_try(struct dlm_ls *ls);
+void dlm_lock_recovery(struct dlm_ls *ls);
void dlm_unlock_recovery(struct dlm_ls *ls);
+void dlm_timer_resume(struct dlm_ls *ls);
int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
int len, unsigned int flags, int *r_nodeid, int *result);
-int dlm_search_rsb_tree(struct rb_root *tree, const void *name, int len,
+int dlm_search_rsb_tree(struct rhashtable *rhash, const void *name, int len,
struct dlm_rsb **r_ret);
-void dlm_recover_purge(struct dlm_ls *ls);
+void dlm_recover_purge(struct dlm_ls *ls, const struct list_head *root_list);
void dlm_purge_mstcpy_locks(struct dlm_rsb *r);
void dlm_recover_grant(struct dlm_ls *ls);
int dlm_recover_waiters_post(struct dlm_ls *ls);
@@ -68,12 +72,12 @@ static inline int is_master(struct dlm_rsb *r)
static inline void lock_rsb(struct dlm_rsb *r)
{
- mutex_lock(&r->res_mutex);
+ spin_lock_bh(&r->res_lock);
}
static inline void unlock_rsb(struct dlm_rsb *r)
{
- mutex_unlock(&r->res_mutex);
+ spin_unlock_bh(&r->res_lock);
}
#endif
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 0455dddb0797..475ab4370dda 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -29,8 +29,6 @@ static int ls_count;
static struct mutex ls_lock;
static struct list_head lslist;
static spinlock_t lslist_lock;
-static struct task_struct * scand_task;
-
static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
{
@@ -247,66 +245,11 @@ void dlm_lockspace_exit(void)
kset_unregister(dlm_kset);
}
-static struct dlm_ls *find_ls_to_scan(void)
-{
- struct dlm_ls *ls;
-
- spin_lock(&lslist_lock);
- list_for_each_entry(ls, &lslist, ls_list) {
- if (time_after_eq(jiffies, ls->ls_scan_time +
- dlm_config.ci_scan_secs * HZ)) {
- spin_unlock(&lslist_lock);
- return ls;
- }
- }
- spin_unlock(&lslist_lock);
- return NULL;
-}
-
-static int dlm_scand(void *data)
-{
- struct dlm_ls *ls;
-
- while (!kthread_should_stop()) {
- ls = find_ls_to_scan();
- if (ls) {
- if (dlm_lock_recovery_try(ls)) {
- ls->ls_scan_time = jiffies;
- dlm_scan_rsbs(ls);
- dlm_unlock_recovery(ls);
- } else {
- ls->ls_scan_time += HZ;
- }
- continue;
- }
- schedule_timeout_interruptible(dlm_config.ci_scan_secs * HZ);
- }
- return 0;
-}
-
-static int dlm_scand_start(void)
-{
- struct task_struct *p;
- int error = 0;
-
- p = kthread_run(dlm_scand, NULL, "dlm_scand");
- if (IS_ERR(p))
- error = PTR_ERR(p);
- else
- scand_task = p;
- return error;
-}
-
-static void dlm_scand_stop(void)
-{
- kthread_stop(scand_task);
-}
-
struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
{
struct dlm_ls *ls;
- spin_lock(&lslist_lock);
+ spin_lock_bh(&lslist_lock);
list_for_each_entry(ls, &lslist, ls_list) {
if (ls->ls_global_id == id) {
@@ -316,7 +259,7 @@ struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
}
ls = NULL;
out:
- spin_unlock(&lslist_lock);
+ spin_unlock_bh(&lslist_lock);
return ls;
}
@@ -324,7 +267,7 @@ struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace)
{
struct dlm_ls *ls;
- spin_lock(&lslist_lock);
+ spin_lock_bh(&lslist_lock);
list_for_each_entry(ls, &lslist, ls_list) {
if (ls->ls_local_handle == lockspace) {
atomic_inc(&ls->ls_count);
@@ -333,7 +276,7 @@ struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace)
}
ls = NULL;
out:
- spin_unlock(&lslist_lock);
+ spin_unlock_bh(&lslist_lock);
return ls;
}
@@ -341,7 +284,7 @@ struct dlm_ls *dlm_find_lockspace_device(int minor)
{
struct dlm_ls *ls;
- spin_lock(&lslist_lock);
+ spin_lock_bh(&lslist_lock);
list_for_each_entry(ls, &lslist, ls_list) {
if (ls->ls_device.minor == minor) {
atomic_inc(&ls->ls_count);
@@ -350,7 +293,7 @@ struct dlm_ls *dlm_find_lockspace_device(int minor)
}
ls = NULL;
out:
- spin_unlock(&lslist_lock);
+ spin_unlock_bh(&lslist_lock);
return ls;
}
@@ -365,15 +308,15 @@ static void remove_lockspace(struct dlm_ls *ls)
retry:
wait_event(ls->ls_count_wait, atomic_read(&ls->ls_count) == 0);
- spin_lock(&lslist_lock);
+ spin_lock_bh(&lslist_lock);
if (atomic_read(&ls->ls_count) != 0) {
- spin_unlock(&lslist_lock);
+ spin_unlock_bh(&lslist_lock);
goto retry;
}
WARN_ON(ls->ls_create_count != 0);
list_del(&ls->ls_list);
- spin_unlock(&lslist_lock);
+ spin_unlock_bh(&lslist_lock);
}
static int threads_start(void)
@@ -382,22 +325,9 @@ static int threads_start(void)
/* Thread for sending/receiving messages for all lockspace's */
error = dlm_midcomms_start();
- if (error) {
+ if (error)
log_print("cannot start dlm midcomms %d", error);
- goto fail;
- }
-
- error = dlm_scand_start();
- if (error) {
- log_print("cannot start dlm_scand thread %d", error);
- goto midcomms_fail;
- }
-
- return 0;
- midcomms_fail:
- dlm_midcomms_stop();
- fail:
return error;
}
@@ -407,9 +337,9 @@ static int new_lockspace(const char *name, const char *cluster,
int *ops_result, dlm_lockspace_t **lockspace)
{
struct dlm_ls *ls;
- int i, size, error;
int do_unreg = 0;
int namelen = strlen(name);
+ int error;
if (namelen > DLM_LOCKSPACE_LEN || namelen == 0)
return -EINVAL;
@@ -448,7 +378,7 @@ static int new_lockspace(const char *name, const char *cluster,
error = 0;
- spin_lock(&lslist_lock);
+ spin_lock_bh(&lslist_lock);
list_for_each_entry(ls, &lslist, ls_list) {
WARN_ON(ls->ls_create_count <= 0);
if (ls->ls_namelen != namelen)
@@ -464,7 +394,7 @@ static int new_lockspace(const char *name, const char *cluster,
error = 1;
break;
}
- spin_unlock(&lslist_lock);
+ spin_unlock_bh(&lslist_lock);
if (error)
goto out;
@@ -492,32 +422,21 @@ static int new_lockspace(const char *name, const char *cluster,
*/
ls->ls_exflags = (flags & ~(DLM_LSFL_FS | DLM_LSFL_NEWEXCL));
- size = READ_ONCE(dlm_config.ci_rsbtbl_size);
- ls->ls_rsbtbl_size = size;
+ INIT_LIST_HEAD(&ls->ls_toss);
+ INIT_LIST_HEAD(&ls->ls_keep);
+ rwlock_init(&ls->ls_rsbtbl_lock);
- ls->ls_rsbtbl = vmalloc(array_size(size, sizeof(struct dlm_rsbtable)));
- if (!ls->ls_rsbtbl)
+ error = rhashtable_init(&ls->ls_rsbtbl, &dlm_rhash_rsb_params);
+ if (error)
goto out_lsfree;
- for (i = 0; i < size; i++) {
- ls->ls_rsbtbl[i].keep.rb_node = NULL;
- ls->ls_rsbtbl[i].toss.rb_node = NULL;
- spin_lock_init(&ls->ls_rsbtbl[i].lock);
- }
-
- for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) {
- ls->ls_remove_names[i] = kzalloc(DLM_RESNAME_MAXLEN+1,
- GFP_KERNEL);
- if (!ls->ls_remove_names[i])
- goto out_rsbtbl;
- }
idr_init(&ls->ls_lkbidr);
- spin_lock_init(&ls->ls_lkbidr_spin);
+ rwlock_init(&ls->ls_lkbidr_lock);
INIT_LIST_HEAD(&ls->ls_waiters);
- mutex_init(&ls->ls_waiters_mutex);
+ spin_lock_init(&ls->ls_waiters_lock);
INIT_LIST_HEAD(&ls->ls_orphans);
- mutex_init(&ls->ls_orphans_mutex);
+ spin_lock_init(&ls->ls_orphans_lock);
INIT_LIST_HEAD(&ls->ls_new_rsb);
spin_lock_init(&ls->ls_new_rsb_spin);
@@ -552,11 +471,9 @@ static int new_lockspace(const char *name, const char *cluster,
ls->ls_recover_seq = get_random_u64();
ls->ls_recover_args = NULL;
init_rwsem(&ls->ls_in_recovery);
- init_rwsem(&ls->ls_recv_active);
+ rwlock_init(&ls->ls_recv_active);
INIT_LIST_HEAD(&ls->ls_requestqueue);
- atomic_set(&ls->ls_requestqueue_cnt, 0);
- init_waitqueue_head(&ls->ls_requestqueue_wait);
- mutex_init(&ls->ls_requestqueue_mutex);
+ rwlock_init(&ls->ls_requestqueue_lock);
spin_lock_init(&ls->ls_clear_proc_locks);
/* Due backwards compatibility with 3.1 we need to use maximum
@@ -565,8 +482,10 @@ static int new_lockspace(const char *name, const char *cluster,
* might send less.
*/
ls->ls_recover_buf = kmalloc(DLM_MAX_SOCKET_BUFSIZE, GFP_NOFS);
- if (!ls->ls_recover_buf)
+ if (!ls->ls_recover_buf) {
+ error = -ENOMEM;
goto out_lkbidr;
+ }
ls->ls_slot = 0;
ls->ls_num_slots = 0;
@@ -580,13 +499,20 @@ static int new_lockspace(const char *name, const char *cluster,
ls->ls_recover_list_count = 0;
ls->ls_local_handle = ls;
init_waitqueue_head(&ls->ls_wait_general);
- INIT_LIST_HEAD(&ls->ls_root_list);
- init_rwsem(&ls->ls_root_sem);
+ INIT_LIST_HEAD(&ls->ls_masters_list);
+ rwlock_init(&ls->ls_masters_lock);
+ INIT_LIST_HEAD(&ls->ls_dir_dump_list);
+ rwlock_init(&ls->ls_dir_dump_lock);
+
+ INIT_LIST_HEAD(&ls->ls_toss_q);
+ spin_lock_init(&ls->ls_toss_q_lock);
+ timer_setup(&ls->ls_timer, dlm_rsb_toss_timer,
+ TIMER_DEFERRABLE);
- spin_lock(&lslist_lock);
+ spin_lock_bh(&lslist_lock);
ls->ls_create_count = 1;
list_add(&ls->ls_list, &lslist);
- spin_unlock(&lslist_lock);
+ spin_unlock_bh(&lslist_lock);
if (flags & DLM_LSFL_FS) {
error = dlm_callback_start(ls);
@@ -655,17 +581,14 @@ static int new_lockspace(const char *name, const char *cluster,
out_callback:
dlm_callback_stop(ls);
out_delist:
- spin_lock(&lslist_lock);
+ spin_lock_bh(&lslist_lock);
list_del(&ls->ls_list);
- spin_unlock(&lslist_lock);
+ spin_unlock_bh(&lslist_lock);
idr_destroy(&ls->ls_recover_idr);
kfree(ls->ls_recover_buf);
out_lkbidr:
idr_destroy(&ls->ls_lkbidr);
- out_rsbtbl:
- for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++)
- kfree(ls->ls_remove_names[i]);
- vfree(ls->ls_rsbtbl);
+ rhashtable_destroy(&ls->ls_rsbtbl);
out_lsfree:
if (do_unreg)
kobject_put(&ls->ls_kobj);
@@ -697,7 +620,6 @@ static int __dlm_new_lockspace(const char *name, const char *cluster,
if (error > 0)
error = 0;
if (!ls_count) {
- dlm_scand_stop();
dlm_midcomms_shutdown();
dlm_midcomms_stop();
}
@@ -756,7 +678,7 @@ static int lockspace_busy(struct dlm_ls *ls, int force)
{
int rv;
- spin_lock(&ls->ls_lkbidr_spin);
+ read_lock_bh(&ls->ls_lkbidr_lock);
if (force == 0) {
rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_any, ls);
} else if (force == 1) {
@@ -764,19 +686,25 @@ static int lockspace_busy(struct dlm_ls *ls, int force)
} else {
rv = 0;
}
- spin_unlock(&ls->ls_lkbidr_spin);
+ read_unlock_bh(&ls->ls_lkbidr_lock);
return rv;
}
+static void rhash_free_rsb(void *ptr, void *arg)
+{
+ struct dlm_rsb *rsb = ptr;
+
+ dlm_free_rsb(rsb);
+}
+
static int release_lockspace(struct dlm_ls *ls, int force)
{
struct dlm_rsb *rsb;
- struct rb_node *n;
- int i, busy, rv;
+ int busy, rv;
busy = lockspace_busy(ls, force);
- spin_lock(&lslist_lock);
+ spin_lock_bh(&lslist_lock);
if (ls->ls_create_count == 1) {
if (busy) {
rv = -EBUSY;
@@ -790,7 +718,7 @@ static int release_lockspace(struct dlm_ls *ls, int force)
} else {
rv = -EINVAL;
}
- spin_unlock(&lslist_lock);
+ spin_unlock_bh(&lslist_lock);
if (rv) {
log_debug(ls, "release_lockspace no remove %d", rv);
@@ -807,8 +735,13 @@ static int release_lockspace(struct dlm_ls *ls, int force)
dlm_recoverd_stop(ls);
+ /* clear the LSFL_RUNNING flag to fast up
+ * time_shutdown_sync(), we don't care anymore
+ */
+ clear_bit(LSFL_RUNNING, &ls->ls_flags);
+ timer_shutdown_sync(&ls->ls_timer);
+
if (ls_count == 1) {
- dlm_scand_stop();
dlm_clear_members(ls);
dlm_midcomms_shutdown();
}
@@ -830,27 +763,9 @@ static int release_lockspace(struct dlm_ls *ls, int force)
idr_destroy(&ls->ls_lkbidr);
/*
- * Free all rsb's on rsbtbl[] lists
+ * Free all rsb's on rsbtbl
*/
-
- for (i = 0; i < ls->ls_rsbtbl_size; i++) {
- while ((n = rb_first(&ls->ls_rsbtbl[i].keep))) {
- rsb = rb_entry(n, struct dlm_rsb, res_hashnode);
- rb_erase(n, &ls->ls_rsbtbl[i].keep);
- dlm_free_rsb(rsb);
- }
-
- while ((n = rb_first(&ls->ls_rsbtbl[i].toss))) {
- rsb = rb_entry(n, struct dlm_rsb, res_hashnode);
- rb_erase(n, &ls->ls_rsbtbl[i].toss);
- dlm_free_rsb(rsb);
- }
- }
-
- vfree(ls->ls_rsbtbl);
-
- for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++)
- kfree(ls->ls_remove_names[i]);
+ rhashtable_free_and_destroy(&ls->ls_rsbtbl, rhash_free_rsb, NULL);
while (!list_empty(&ls->ls_new_rsb)) {
rsb = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb,
@@ -918,20 +833,19 @@ void dlm_stop_lockspaces(void)
restart:
count = 0;
- spin_lock(&lslist_lock);
+ spin_lock_bh(&lslist_lock);
list_for_each_entry(ls, &lslist, ls_list) {
if (!test_bit(LSFL_RUNNING, &ls->ls_flags)) {
count++;
continue;
}
- spin_unlock(&lslist_lock);
+ spin_unlock_bh(&lslist_lock);
log_error(ls, "no userland control daemon, stopping lockspace");
dlm_ls_stop(ls);
goto restart;
}
- spin_unlock(&lslist_lock);
+ spin_unlock_bh(&lslist_lock);
if (count)
log_print("dlm user daemon left %d lockspaces", count);
}
-
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 6296c62c10fa..6b8078085e56 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -204,6 +204,7 @@ static void process_dlm_messages(struct work_struct *work);
static DECLARE_WORK(process_work, process_dlm_messages);
static DEFINE_SPINLOCK(processqueue_lock);
static bool process_dlm_messages_pending;
+static DECLARE_WAIT_QUEUE_HEAD(processqueue_wq);
static atomic_t processqueue_count;
static LIST_HEAD(processqueue);
@@ -248,7 +249,7 @@ struct kmem_cache *dlm_lowcomms_writequeue_cache_create(void)
struct kmem_cache *dlm_lowcomms_msg_cache_create(void)
{
- return kmem_cache_create("dlm_msg", sizeof(struct dlm_msg), 0, 0, NULL);
+ return KMEM_CACHE(dlm_msg, 0);
}
/* need to held writequeue_lock */
@@ -867,36 +868,38 @@ static void process_dlm_messages(struct work_struct *work)
{
struct processqueue_entry *pentry;
- spin_lock(&processqueue_lock);
+ spin_lock_bh(&processqueue_lock);
pentry = list_first_entry_or_null(&processqueue,
struct processqueue_entry, list);
if (WARN_ON_ONCE(!pentry)) {
process_dlm_messages_pending = false;
- spin_unlock(&processqueue_lock);
+ spin_unlock_bh(&processqueue_lock);
return;
}
list_del(&pentry->list);
- atomic_dec(&processqueue_count);
- spin_unlock(&processqueue_lock);
+ if (atomic_dec_and_test(&processqueue_count))
+ wake_up(&processqueue_wq);
+ spin_unlock_bh(&processqueue_lock);
for (;;) {
dlm_process_incoming_buffer(pentry->nodeid, pentry->buf,
pentry->buflen);
free_processqueue_entry(pentry);
- spin_lock(&processqueue_lock);
+ spin_lock_bh(&processqueue_lock);
pentry = list_first_entry_or_null(&processqueue,
struct processqueue_entry, list);
if (!pentry) {
process_dlm_messages_pending = false;
- spin_unlock(&processqueue_lock);
+ spin_unlock_bh(&processqueue_lock);
break;
}
list_del(&pentry->list);
- atomic_dec(&processqueue_count);
- spin_unlock(&processqueue_lock);
+ if (atomic_dec_and_test(&processqueue_count))
+ wake_up(&processqueue_wq);
+ spin_unlock_bh(&processqueue_lock);
}
}
@@ -966,14 +969,14 @@ again:
memmove(con->rx_leftover_buf, pentry->buf + ret,
con->rx_leftover);
- spin_lock(&processqueue_lock);
+ spin_lock_bh(&processqueue_lock);
ret = atomic_inc_return(&processqueue_count);
list_add_tail(&pentry->list, &processqueue);
if (!process_dlm_messages_pending) {
process_dlm_messages_pending = true;
queue_work(process_workqueue, &process_work);
}
- spin_unlock(&processqueue_lock);
+ spin_unlock_bh(&processqueue_lock);
if (ret > DLM_MAX_PROCESS_BUFFERS)
return DLM_IO_FLUSH;
@@ -1229,14 +1232,13 @@ out:
};
static struct dlm_msg *dlm_lowcomms_new_msg_con(struct connection *con, int len,
- gfp_t allocation, char **ppc,
- void (*cb)(void *data),
+ char **ppc, void (*cb)(void *data),
void *data)
{
struct writequeue_entry *e;
struct dlm_msg *msg;
- msg = dlm_allocate_msg(allocation);
+ msg = dlm_allocate_msg();
if (!msg)
return NULL;
@@ -1261,9 +1263,8 @@ static struct dlm_msg *dlm_lowcomms_new_msg_con(struct connection *con, int len,
* dlm_lowcomms_commit_msg which is a must call if success
*/
#ifndef __CHECKER__
-struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, gfp_t allocation,
- char **ppc, void (*cb)(void *data),
- void *data)
+struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, char **ppc,
+ void (*cb)(void *data), void *data)
{
struct connection *con;
struct dlm_msg *msg;
@@ -1284,7 +1285,7 @@ struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, gfp_t allocation,
return NULL;
}
- msg = dlm_lowcomms_new_msg_con(con, len, allocation, ppc, cb, data);
+ msg = dlm_lowcomms_new_msg_con(con, len, ppc, cb, data);
if (!msg) {
srcu_read_unlock(&connections_srcu, idx);
return NULL;
@@ -1348,8 +1349,8 @@ int dlm_lowcomms_resend_msg(struct dlm_msg *msg)
if (msg->retransmit)
return 1;
- msg_resend = dlm_lowcomms_new_msg_con(msg->entry->con, msg->len,
- GFP_ATOMIC, &ppc, NULL, NULL);
+ msg_resend = dlm_lowcomms_new_msg_con(msg->entry->con, msg->len, &ppc,
+ NULL, NULL);
if (!msg_resend)
return -ENOMEM;
@@ -1513,7 +1514,20 @@ static void process_recv_sockets(struct work_struct *work)
/* CF_RECV_PENDING cleared */
break;
case DLM_IO_FLUSH:
- flush_workqueue(process_workqueue);
+ /* we can't flush the process_workqueue here because a
+ * WQ_MEM_RECLAIM workequeue can occurr a deadlock for a non
+ * WQ_MEM_RECLAIM workqueue such as process_workqueue. Instead
+ * we have a waitqueue to wait until all messages are
+ * processed.
+ *
+ * This handling is only necessary to backoff the sender and
+ * not queue all messages from the socket layer into DLM
+ * processqueue. When DLM is capable to parse multiple messages
+ * on an e.g. per socket basis this handling can might be
+ * removed. Especially in a message burst we are too slow to
+ * process messages and the queue will fill up memory.
+ */
+ wait_event(processqueue_wq, !atomic_read(&processqueue_count));
fallthrough;
case DLM_IO_RESCHED:
cond_resched();
@@ -1703,11 +1717,7 @@ static int work_start(void)
return -ENOMEM;
}
- /* ordered dlm message process queue,
- * should be converted to a tasklet
- */
- process_workqueue = alloc_ordered_workqueue("dlm_process",
- WQ_HIGHPRI | WQ_MEM_RECLAIM);
+ process_workqueue = alloc_workqueue("dlm_process", WQ_HIGHPRI | WQ_BH, 0);
if (!process_workqueue) {
log_print("can't start dlm_process");
destroy_workqueue(io_workqueue);
diff --git a/fs/dlm/lowcomms.h b/fs/dlm/lowcomms.h
index 3e8dca66183b..8deb16f8f620 100644
--- a/fs/dlm/lowcomms.h
+++ b/fs/dlm/lowcomms.h
@@ -39,9 +39,8 @@ void dlm_lowcomms_stop(void);
void dlm_lowcomms_init(void);
void dlm_lowcomms_exit(void);
int dlm_lowcomms_close(int nodeid);
-struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, gfp_t allocation,
- char **ppc, void (*cb)(void *data),
- void *data);
+struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, char **ppc,
+ void (*cb)(void *data), void *data);
void dlm_lowcomms_commit_msg(struct dlm_msg *msg);
void dlm_lowcomms_put_msg(struct dlm_msg *msg);
int dlm_lowcomms_resend_msg(struct dlm_msg *msg);
diff --git a/fs/dlm/member.c b/fs/dlm/member.c
index be7909ead71b..c46e306f2e5c 100644
--- a/fs/dlm/member.c
+++ b/fs/dlm/member.c
@@ -630,7 +630,7 @@ int dlm_ls_stop(struct dlm_ls *ls)
* message to the requestqueue without races.
*/
- down_write(&ls->ls_recv_active);
+ write_lock_bh(&ls->ls_recv_active);
/*
* Abort any recovery that's in progress (see RECOVER_STOP,
@@ -638,18 +638,25 @@ int dlm_ls_stop(struct dlm_ls *ls)
* dlm to quit any processing (see RUNNING, dlm_locking_stopped()).
*/
- spin_lock(&ls->ls_recover_lock);
+ spin_lock_bh(&ls->ls_recover_lock);
set_bit(LSFL_RECOVER_STOP, &ls->ls_flags);
new = test_and_clear_bit(LSFL_RUNNING, &ls->ls_flags);
+ if (new)
+ timer_delete_sync(&ls->ls_timer);
ls->ls_recover_seq++;
- spin_unlock(&ls->ls_recover_lock);
+
+ /* activate requestqueue and stop processing */
+ write_lock_bh(&ls->ls_requestqueue_lock);
+ set_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags);
+ write_unlock_bh(&ls->ls_requestqueue_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
/*
* Let dlm_recv run again, now any normal messages will be saved on the
* requestqueue for later.
*/
- up_write(&ls->ls_recv_active);
+ write_unlock_bh(&ls->ls_recv_active);
/*
* This in_recovery lock does two things:
@@ -674,13 +681,13 @@ int dlm_ls_stop(struct dlm_ls *ls)
dlm_recoverd_suspend(ls);
- spin_lock(&ls->ls_recover_lock);
+ spin_lock_bh(&ls->ls_recover_lock);
kfree(ls->ls_slots);
ls->ls_slots = NULL;
ls->ls_num_slots = 0;
ls->ls_slots_size = 0;
ls->ls_recover_status = 0;
- spin_unlock(&ls->ls_recover_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
dlm_recoverd_resume(ls);
@@ -714,12 +721,12 @@ int dlm_ls_start(struct dlm_ls *ls)
if (error < 0)
goto fail_rv;
- spin_lock(&ls->ls_recover_lock);
+ spin_lock_bh(&ls->ls_recover_lock);
/* the lockspace needs to be stopped before it can be started */
if (!dlm_locking_stopped(ls)) {
- spin_unlock(&ls->ls_recover_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
log_error(ls, "start ignored: lockspace running");
error = -EINVAL;
goto fail;
@@ -730,7 +737,7 @@ int dlm_ls_start(struct dlm_ls *ls)
rv->seq = ++ls->ls_recover_seq;
rv_old = ls->ls_recover_args;
ls->ls_recover_args = rv;
- spin_unlock(&ls->ls_recover_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
if (rv_old) {
log_error(ls, "unused recovery %llx %d",
diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c
index 64f212a066cf..15a8b1cee433 100644
--- a/fs/dlm/memory.c
+++ b/fs/dlm/memory.c
@@ -84,7 +84,7 @@ char *dlm_allocate_lvb(struct dlm_ls *ls)
{
char *p;
- p = kzalloc(ls->ls_lvblen, GFP_NOFS);
+ p = kzalloc(ls->ls_lvblen, GFP_ATOMIC);
return p;
}
@@ -97,7 +97,7 @@ struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls)
{
struct dlm_rsb *r;
- r = kmem_cache_zalloc(rsb_cache, GFP_NOFS);
+ r = kmem_cache_zalloc(rsb_cache, GFP_ATOMIC);
return r;
}
@@ -112,7 +112,7 @@ struct dlm_lkb *dlm_allocate_lkb(struct dlm_ls *ls)
{
struct dlm_lkb *lkb;
- lkb = kmem_cache_zalloc(lkb_cache, GFP_NOFS);
+ lkb = kmem_cache_zalloc(lkb_cache, GFP_ATOMIC);
return lkb;
}
@@ -127,16 +127,12 @@ void dlm_free_lkb(struct dlm_lkb *lkb)
}
}
- /* drop references if they are set */
- dlm_callback_set_last_ptr(&lkb->lkb_last_cast, NULL);
- dlm_callback_set_last_ptr(&lkb->lkb_last_cb, NULL);
-
kmem_cache_free(lkb_cache, lkb);
}
-struct dlm_mhandle *dlm_allocate_mhandle(gfp_t allocation)
+struct dlm_mhandle *dlm_allocate_mhandle(void)
{
- return kmem_cache_alloc(mhandle_cache, allocation);
+ return kmem_cache_alloc(mhandle_cache, GFP_ATOMIC);
}
void dlm_free_mhandle(struct dlm_mhandle *mhandle)
@@ -154,9 +150,9 @@ void dlm_free_writequeue(struct writequeue_entry *writequeue)
kmem_cache_free(writequeue_cache, writequeue);
}
-struct dlm_msg *dlm_allocate_msg(gfp_t allocation)
+struct dlm_msg *dlm_allocate_msg(void)
{
- return kmem_cache_alloc(msg_cache, allocation);
+ return kmem_cache_alloc(msg_cache, GFP_ATOMIC);
}
void dlm_free_msg(struct dlm_msg *msg)
diff --git a/fs/dlm/memory.h b/fs/dlm/memory.h
index 6b29563d24f7..15198d46b42a 100644
--- a/fs/dlm/memory.h
+++ b/fs/dlm/memory.h
@@ -20,11 +20,11 @@ struct dlm_lkb *dlm_allocate_lkb(struct dlm_ls *ls);
void dlm_free_lkb(struct dlm_lkb *l);
char *dlm_allocate_lvb(struct dlm_ls *ls);
void dlm_free_lvb(char *l);
-struct dlm_mhandle *dlm_allocate_mhandle(gfp_t allocation);
+struct dlm_mhandle *dlm_allocate_mhandle(void);
void dlm_free_mhandle(struct dlm_mhandle *mhandle);
struct writequeue_entry *dlm_allocate_writequeue(void);
void dlm_free_writequeue(struct writequeue_entry *writequeue);
-struct dlm_msg *dlm_allocate_msg(gfp_t allocation);
+struct dlm_msg *dlm_allocate_msg(void);
void dlm_free_msg(struct dlm_msg *msg);
struct dlm_callback *dlm_allocate_cb(void);
void dlm_free_cb(struct dlm_callback *cb);
diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
index 2247ebb61be1..c34f38e9ee5c 100644
--- a/fs/dlm/midcomms.c
+++ b/fs/dlm/midcomms.c
@@ -226,8 +226,7 @@ static DEFINE_MUTEX(close_lock);
struct kmem_cache *dlm_midcomms_cache_create(void)
{
- return kmem_cache_create("dlm_mhandle", sizeof(struct dlm_mhandle),
- 0, 0, NULL);
+ return KMEM_CACHE(dlm_mhandle, 0);
}
static inline const char *dlm_state_str(int state)
@@ -365,9 +364,9 @@ int dlm_midcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
node->users = 0;
midcomms_node_reset(node);
- spin_lock(&nodes_lock);
+ spin_lock_bh(&nodes_lock);
hlist_add_head_rcu(&node->hlist, &node_hash[r]);
- spin_unlock(&nodes_lock);
+ spin_unlock_bh(&nodes_lock);
node->debugfs = dlm_create_debug_comms_file(nodeid, node);
return 0;
@@ -380,8 +379,7 @@ static int dlm_send_ack(int nodeid, uint32_t seq)
struct dlm_msg *msg;
char *ppc;
- msg = dlm_lowcomms_new_msg(nodeid, mb_len, GFP_ATOMIC, &ppc,
- NULL, NULL);
+ msg = dlm_lowcomms_new_msg(nodeid, mb_len, &ppc, NULL, NULL);
if (!msg)
return -ENOMEM;
@@ -429,7 +427,7 @@ static int dlm_send_fin(struct midcomms_node *node,
struct dlm_mhandle *mh;
char *ppc;
- mh = dlm_midcomms_get_mhandle(node->nodeid, mb_len, GFP_ATOMIC, &ppc);
+ mh = dlm_midcomms_get_mhandle(node->nodeid, mb_len, &ppc);
if (!mh)
return -ENOMEM;
@@ -479,7 +477,7 @@ static void dlm_receive_ack(struct midcomms_node *node, uint32_t seq)
static void dlm_pas_fin_ack_rcv(struct midcomms_node *node)
{
- spin_lock(&node->state_lock);
+ spin_lock_bh(&node->state_lock);
pr_debug("receive passive fin ack from node %d with state %s\n",
node->nodeid, dlm_state_str(node->state));
@@ -493,13 +491,13 @@ static void dlm_pas_fin_ack_rcv(struct midcomms_node *node)
wake_up(&node->shutdown_wait);
break;
default:
- spin_unlock(&node->state_lock);
+ spin_unlock_bh(&node->state_lock);
log_print("%s: unexpected state: %d",
__func__, node->state);
WARN_ON_ONCE(1);
return;
}
- spin_unlock(&node->state_lock);
+ spin_unlock_bh(&node->state_lock);
}
static void dlm_receive_buffer_3_2_trace(uint32_t seq,
@@ -536,7 +534,7 @@ static void dlm_midcomms_receive_buffer(const union dlm_packet *p,
if (is_expected_seq) {
switch (p->header.h_cmd) {
case DLM_FIN:
- spin_lock(&node->state_lock);
+ spin_lock_bh(&node->state_lock);
pr_debug("receive fin msg from node %d with state %s\n",
node->nodeid, dlm_state_str(node->state));
@@ -577,13 +575,13 @@ static void dlm_midcomms_receive_buffer(const union dlm_packet *p,
/* probably remove_member caught it, do nothing */
break;
default:
- spin_unlock(&node->state_lock);
+ spin_unlock_bh(&node->state_lock);
log_print("%s: unexpected state: %d",
__func__, node->state);
WARN_ON_ONCE(1);
return;
}
- spin_unlock(&node->state_lock);
+ spin_unlock_bh(&node->state_lock);
break;
default:
WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags));
@@ -977,13 +975,13 @@ static void midcomms_new_msg_cb(void *data)
}
static struct dlm_msg *dlm_midcomms_get_msg_3_2(struct dlm_mhandle *mh, int nodeid,
- int len, gfp_t allocation, char **ppc)
+ int len, char **ppc)
{
struct dlm_opts *opts;
struct dlm_msg *msg;
msg = dlm_lowcomms_new_msg(nodeid, len + DLM_MIDCOMMS_OPT_LEN,
- allocation, ppc, midcomms_new_msg_cb, mh);
+ ppc, midcomms_new_msg_cb, mh);
if (!msg)
return NULL;
@@ -1002,8 +1000,7 @@ static struct dlm_msg *dlm_midcomms_get_msg_3_2(struct dlm_mhandle *mh, int node
* dlm_midcomms_commit_mhandle which is a must call if success
*/
#ifndef __CHECKER__
-struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len,
- gfp_t allocation, char **ppc)
+struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len, char **ppc)
{
struct midcomms_node *node;
struct dlm_mhandle *mh;
@@ -1018,7 +1015,7 @@ struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len,
/* this is a bug, however we going on and hope it will be resolved */
WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_TX, &node->flags));
- mh = dlm_allocate_mhandle(allocation);
+ mh = dlm_allocate_mhandle();
if (!mh)
goto err;
@@ -1029,8 +1026,7 @@ struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len,
switch (node->version) {
case DLM_VERSION_3_1:
- msg = dlm_lowcomms_new_msg(nodeid, len, allocation, ppc,
- NULL, NULL);
+ msg = dlm_lowcomms_new_msg(nodeid, len, ppc, NULL, NULL);
if (!msg) {
dlm_free_mhandle(mh);
goto err;
@@ -1041,8 +1037,7 @@ struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len,
/* send ack back if necessary */
dlm_send_ack_threshold(node, DLM_SEND_ACK_BACK_MSG_THRESHOLD);
- msg = dlm_midcomms_get_msg_3_2(mh, nodeid, len, allocation,
- ppc);
+ msg = dlm_midcomms_get_msg_3_2(mh, nodeid, len, ppc);
if (!msg) {
dlm_free_mhandle(mh);
goto err;
@@ -1187,7 +1182,7 @@ void dlm_midcomms_exit(void)
static void dlm_act_fin_ack_rcv(struct midcomms_node *node)
{
- spin_lock(&node->state_lock);
+ spin_lock_bh(&node->state_lock);
pr_debug("receive active fin ack from node %d with state %s\n",
node->nodeid, dlm_state_str(node->state));
@@ -1207,13 +1202,13 @@ static void dlm_act_fin_ack_rcv(struct midcomms_node *node)
wake_up(&node->shutdown_wait);
break;
default:
- spin_unlock(&node->state_lock);
+ spin_unlock_bh(&node->state_lock);
log_print("%s: unexpected state: %d",
__func__, node->state);
WARN_ON_ONCE(1);
return;
}
- spin_unlock(&node->state_lock);
+ spin_unlock_bh(&node->state_lock);
}
void dlm_midcomms_add_member(int nodeid)
@@ -1228,7 +1223,7 @@ void dlm_midcomms_add_member(int nodeid)
return;
}
- spin_lock(&node->state_lock);
+ spin_lock_bh(&node->state_lock);
if (!node->users) {
pr_debug("receive add member from node %d with state %s\n",
node->nodeid, dlm_state_str(node->state));
@@ -1256,7 +1251,7 @@ void dlm_midcomms_add_member(int nodeid)
node->users++;
pr_debug("node %d users inc count %d\n", nodeid, node->users);
- spin_unlock(&node->state_lock);
+ spin_unlock_bh(&node->state_lock);
srcu_read_unlock(&nodes_srcu, idx);
}
@@ -1274,13 +1269,13 @@ void dlm_midcomms_remove_member(int nodeid)
return;
}
- spin_lock(&node->state_lock);
+ spin_lock_bh(&node->state_lock);
/* case of dlm_midcomms_addr() created node but
* was not added before because dlm_midcomms_close()
* removed the node
*/
if (!node->users) {
- spin_unlock(&node->state_lock);
+ spin_unlock_bh(&node->state_lock);
srcu_read_unlock(&nodes_srcu, idx);
return;
}
@@ -1318,7 +1313,7 @@ void dlm_midcomms_remove_member(int nodeid)
break;
}
}
- spin_unlock(&node->state_lock);
+ spin_unlock_bh(&node->state_lock);
srcu_read_unlock(&nodes_srcu, idx);
}
@@ -1356,7 +1351,7 @@ static void midcomms_shutdown(struct midcomms_node *node)
return;
}
- spin_lock(&node->state_lock);
+ spin_lock_bh(&node->state_lock);
pr_debug("receive active shutdown for node %d with state %s\n",
node->nodeid, dlm_state_str(node->state));
switch (node->state) {
@@ -1375,7 +1370,7 @@ static void midcomms_shutdown(struct midcomms_node *node)
*/
break;
}
- spin_unlock(&node->state_lock);
+ spin_unlock_bh(&node->state_lock);
if (DLM_DEBUG_FENCE_TERMINATION)
msleep(5000);
@@ -1446,9 +1441,9 @@ int dlm_midcomms_close(int nodeid)
ret = dlm_lowcomms_close(nodeid);
dlm_delete_debug_comms_file(node->debugfs);
- spin_lock(&nodes_lock);
+ spin_lock_bh(&nodes_lock);
hlist_del_rcu(&node->hlist);
- spin_unlock(&nodes_lock);
+ spin_unlock_bh(&nodes_lock);
srcu_read_unlock(&nodes_srcu, idx);
/* wait that all readers left until flush send queue */
@@ -1502,8 +1497,8 @@ int dlm_midcomms_rawmsg_send(struct midcomms_node *node, void *buf,
rd.node = node;
rd.buf = buf;
- msg = dlm_lowcomms_new_msg(node->nodeid, buflen, GFP_NOFS,
- &msgbuf, midcomms_new_rawmsg_cb, &rd);
+ msg = dlm_lowcomms_new_msg(node->nodeid, buflen, &msgbuf,
+ midcomms_new_rawmsg_cb, &rd);
if (!msg)
return -ENOMEM;
diff --git a/fs/dlm/midcomms.h b/fs/dlm/midcomms.h
index e7246fb3ef57..278d26fdeb2c 100644
--- a/fs/dlm/midcomms.h
+++ b/fs/dlm/midcomms.h
@@ -16,8 +16,7 @@ struct midcomms_node;
int dlm_validate_incoming_buffer(int nodeid, unsigned char *buf, int len);
int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int buflen);
-struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len,
- gfp_t allocation, char **ppc);
+struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len, char **ppc);
void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh, const void *name,
int namelen);
int dlm_midcomms_addr(int nodeid, struct sockaddr_storage *addr, int len);
diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c
index 3b734aed26b5..be1a71a6303a 100644
--- a/fs/dlm/rcom.c
+++ b/fs/dlm/rcom.c
@@ -55,7 +55,7 @@ static int create_rcom(struct dlm_ls *ls, int to_nodeid, int type, int len,
struct dlm_mhandle *mh;
char *mb;
- mh = dlm_midcomms_get_mhandle(to_nodeid, mb_len, GFP_NOFS, &mb);
+ mh = dlm_midcomms_get_mhandle(to_nodeid, mb_len, &mb);
if (!mh) {
log_print("%s to %d type %d len %d ENOBUFS",
__func__, to_nodeid, type, len);
@@ -75,8 +75,7 @@ static int create_rcom_stateless(struct dlm_ls *ls, int to_nodeid, int type,
struct dlm_msg *msg;
char *mb;
- msg = dlm_lowcomms_new_msg(to_nodeid, mb_len, GFP_NOFS, &mb,
- NULL, NULL);
+ msg = dlm_lowcomms_new_msg(to_nodeid, mb_len, &mb, NULL, NULL);
if (!msg) {
log_print("create_rcom to %d type %d len %d ENOBUFS",
to_nodeid, type, len);
@@ -144,18 +143,18 @@ static int check_rcom_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
static void allow_sync_reply(struct dlm_ls *ls, __le64 *new_seq)
{
- spin_lock(&ls->ls_rcom_spin);
+ spin_lock_bh(&ls->ls_rcom_spin);
*new_seq = cpu_to_le64(++ls->ls_rcom_seq);
set_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
- spin_unlock(&ls->ls_rcom_spin);
+ spin_unlock_bh(&ls->ls_rcom_spin);
}
static void disallow_sync_reply(struct dlm_ls *ls)
{
- spin_lock(&ls->ls_rcom_spin);
+ spin_lock_bh(&ls->ls_rcom_spin);
clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
clear_bit(LSFL_RCOM_READY, &ls->ls_flags);
- spin_unlock(&ls->ls_rcom_spin);
+ spin_unlock_bh(&ls->ls_rcom_spin);
}
/*
@@ -246,10 +245,10 @@ static void receive_rcom_status(struct dlm_ls *ls,
goto do_create;
}
- spin_lock(&ls->ls_recover_lock);
+ spin_lock_bh(&ls->ls_recover_lock);
status = ls->ls_recover_status;
num_slots = ls->ls_num_slots;
- spin_unlock(&ls->ls_recover_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
len += num_slots * sizeof(struct rcom_slot);
do_create:
@@ -267,9 +266,9 @@ static void receive_rcom_status(struct dlm_ls *ls,
if (!num_slots)
goto do_send;
- spin_lock(&ls->ls_recover_lock);
+ spin_lock_bh(&ls->ls_recover_lock);
if (ls->ls_num_slots != num_slots) {
- spin_unlock(&ls->ls_recover_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
log_debug(ls, "receive_rcom_status num_slots %d to %d",
num_slots, ls->ls_num_slots);
rc->rc_result = 0;
@@ -278,7 +277,7 @@ static void receive_rcom_status(struct dlm_ls *ls,
}
dlm_slots_copy_out(ls, rc);
- spin_unlock(&ls->ls_recover_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
do_send:
send_rcom_stateless(msg, rc);
@@ -286,7 +285,7 @@ static void receive_rcom_status(struct dlm_ls *ls,
static void receive_sync_reply(struct dlm_ls *ls, const struct dlm_rcom *rc_in)
{
- spin_lock(&ls->ls_rcom_spin);
+ spin_lock_bh(&ls->ls_rcom_spin);
if (!test_bit(LSFL_RCOM_WAIT, &ls->ls_flags) ||
le64_to_cpu(rc_in->rc_id) != ls->ls_rcom_seq) {
log_debug(ls, "reject reply %d from %d seq %llx expect %llx",
@@ -302,7 +301,7 @@ static void receive_sync_reply(struct dlm_ls *ls, const struct dlm_rcom *rc_in)
clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
wake_up(&ls->ls_wait_general);
out:
- spin_unlock(&ls->ls_rcom_spin);
+ spin_unlock_bh(&ls->ls_rcom_spin);
}
int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name,
@@ -510,7 +509,7 @@ int dlm_send_ls_not_ready(int nodeid, const struct dlm_rcom *rc_in)
char *mb;
int mb_len = sizeof(struct dlm_rcom) + sizeof(struct rcom_config);
- mh = dlm_midcomms_get_mhandle(nodeid, mb_len, GFP_NOFS, &mb);
+ mh = dlm_midcomms_get_mhandle(nodeid, mb_len, &mb);
if (!mh)
return -ENOBUFS;
@@ -614,11 +613,11 @@ void dlm_receive_rcom(struct dlm_ls *ls, const struct dlm_rcom *rc, int nodeid)
break;
}
- spin_lock(&ls->ls_recover_lock);
+ spin_lock_bh(&ls->ls_recover_lock);
status = ls->ls_recover_status;
stop = dlm_recovery_stopped(ls);
seq = ls->ls_recover_seq;
- spin_unlock(&ls->ls_recover_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
if (stop && (rc->rc_type != cpu_to_le32(DLM_RCOM_STATUS)))
goto ignore;
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
index 53917c0aa3c0..f493d5f30c58 100644
--- a/fs/dlm/recover.c
+++ b/fs/dlm/recover.c
@@ -74,9 +74,9 @@ int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls))
uint32_t dlm_recover_status(struct dlm_ls *ls)
{
uint32_t status;
- spin_lock(&ls->ls_recover_lock);
+ spin_lock_bh(&ls->ls_recover_lock);
status = ls->ls_recover_status;
- spin_unlock(&ls->ls_recover_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
return status;
}
@@ -87,9 +87,9 @@ static void _set_recover_status(struct dlm_ls *ls, uint32_t status)
void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status)
{
- spin_lock(&ls->ls_recover_lock);
+ spin_lock_bh(&ls->ls_recover_lock);
_set_recover_status(ls, status);
- spin_unlock(&ls->ls_recover_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
}
static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status,
@@ -188,13 +188,13 @@ int dlm_recover_members_wait(struct dlm_ls *ls, uint64_t seq)
rv = dlm_slots_assign(ls, &num_slots, &slots_size, &slots, &gen);
if (!rv) {
- spin_lock(&ls->ls_recover_lock);
+ spin_lock_bh(&ls->ls_recover_lock);
_set_recover_status(ls, DLM_RS_NODES_ALL);
ls->ls_num_slots = num_slots;
ls->ls_slots_size = slots_size;
ls->ls_slots = slots;
ls->ls_generation = gen;
- spin_unlock(&ls->ls_recover_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
} else {
dlm_set_recover_status(ls, DLM_RS_NODES_ALL);
}
@@ -241,9 +241,9 @@ static int recover_list_empty(struct dlm_ls *ls)
{
int empty;
- spin_lock(&ls->ls_recover_list_lock);
+ spin_lock_bh(&ls->ls_recover_list_lock);
empty = list_empty(&ls->ls_recover_list);
- spin_unlock(&ls->ls_recover_list_lock);
+ spin_unlock_bh(&ls->ls_recover_list_lock);
return empty;
}
@@ -252,23 +252,23 @@ static void recover_list_add(struct dlm_rsb *r)
{
struct dlm_ls *ls = r->res_ls;
- spin_lock(&ls->ls_recover_list_lock);
+ spin_lock_bh(&ls->ls_recover_list_lock);
if (list_empty(&r->res_recover_list)) {
list_add_tail(&r->res_recover_list, &ls->ls_recover_list);
ls->ls_recover_list_count++;
dlm_hold_rsb(r);
}
- spin_unlock(&ls->ls_recover_list_lock);
+ spin_unlock_bh(&ls->ls_recover_list_lock);
}
static void recover_list_del(struct dlm_rsb *r)
{
struct dlm_ls *ls = r->res_ls;
- spin_lock(&ls->ls_recover_list_lock);
+ spin_lock_bh(&ls->ls_recover_list_lock);
list_del_init(&r->res_recover_list);
ls->ls_recover_list_count--;
- spin_unlock(&ls->ls_recover_list_lock);
+ spin_unlock_bh(&ls->ls_recover_list_lock);
dlm_put_rsb(r);
}
@@ -277,7 +277,7 @@ static void recover_list_clear(struct dlm_ls *ls)
{
struct dlm_rsb *r, *s;
- spin_lock(&ls->ls_recover_list_lock);
+ spin_lock_bh(&ls->ls_recover_list_lock);
list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) {
list_del_init(&r->res_recover_list);
r->res_recover_locks_count = 0;
@@ -290,17 +290,17 @@ static void recover_list_clear(struct dlm_ls *ls)
ls->ls_recover_list_count);
ls->ls_recover_list_count = 0;
}
- spin_unlock(&ls->ls_recover_list_lock);
+ spin_unlock_bh(&ls->ls_recover_list_lock);
}
static int recover_idr_empty(struct dlm_ls *ls)
{
int empty = 1;
- spin_lock(&ls->ls_recover_idr_lock);
+ spin_lock_bh(&ls->ls_recover_idr_lock);
if (ls->ls_recover_list_count)
empty = 0;
- spin_unlock(&ls->ls_recover_idr_lock);
+ spin_unlock_bh(&ls->ls_recover_idr_lock);
return empty;
}
@@ -310,8 +310,7 @@ static int recover_idr_add(struct dlm_rsb *r)
struct dlm_ls *ls = r->res_ls;
int rv;
- idr_preload(GFP_NOFS);
- spin_lock(&ls->ls_recover_idr_lock);
+ spin_lock_bh(&ls->ls_recover_idr_lock);
if (r->res_id) {
rv = -1;
goto out_unlock;
@@ -325,8 +324,7 @@ static int recover_idr_add(struct dlm_rsb *r)
dlm_hold_rsb(r);
rv = 0;
out_unlock:
- spin_unlock(&ls->ls_recover_idr_lock);
- idr_preload_end();
+ spin_unlock_bh(&ls->ls_recover_idr_lock);
return rv;
}
@@ -334,11 +332,11 @@ static void recover_idr_del(struct dlm_rsb *r)
{
struct dlm_ls *ls = r->res_ls;
- spin_lock(&ls->ls_recover_idr_lock);
+ spin_lock_bh(&ls->ls_recover_idr_lock);
idr_remove(&ls->ls_recover_idr, r->res_id);
r->res_id = 0;
ls->ls_recover_list_count--;
- spin_unlock(&ls->ls_recover_idr_lock);
+ spin_unlock_bh(&ls->ls_recover_idr_lock);
dlm_put_rsb(r);
}
@@ -347,9 +345,9 @@ static struct dlm_rsb *recover_idr_find(struct dlm_ls *ls, uint64_t id)
{
struct dlm_rsb *r;
- spin_lock(&ls->ls_recover_idr_lock);
+ spin_lock_bh(&ls->ls_recover_idr_lock);
r = idr_find(&ls->ls_recover_idr, (int)id);
- spin_unlock(&ls->ls_recover_idr_lock);
+ spin_unlock_bh(&ls->ls_recover_idr_lock);
return r;
}
@@ -358,7 +356,7 @@ static void recover_idr_clear(struct dlm_ls *ls)
struct dlm_rsb *r;
int id;
- spin_lock(&ls->ls_recover_idr_lock);
+ spin_lock_bh(&ls->ls_recover_idr_lock);
idr_for_each_entry(&ls->ls_recover_idr, r, id) {
idr_remove(&ls->ls_recover_idr, id);
@@ -374,7 +372,7 @@ static void recover_idr_clear(struct dlm_ls *ls)
ls->ls_recover_list_count);
ls->ls_recover_list_count = 0;
}
- spin_unlock(&ls->ls_recover_idr_lock);
+ spin_unlock_bh(&ls->ls_recover_idr_lock);
}
@@ -521,7 +519,8 @@ static int recover_master_static(struct dlm_rsb *r, unsigned int *count)
* the correct dir node.
*/
-int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq)
+int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq,
+ const struct list_head *root_list)
{
struct dlm_rsb *r;
unsigned int total = 0;
@@ -531,10 +530,8 @@ int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq)
log_rinfo(ls, "dlm_recover_masters");
- down_read(&ls->ls_root_sem);
- list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
+ list_for_each_entry(r, root_list, res_root_list) {
if (dlm_recovery_stopped(ls)) {
- up_read(&ls->ls_root_sem);
error = -EINTR;
goto out;
}
@@ -548,12 +545,9 @@ int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq)
cond_resched();
total++;
- if (error) {
- up_read(&ls->ls_root_sem);
+ if (error)
goto out;
- }
}
- up_read(&ls->ls_root_sem);
log_rinfo(ls, "dlm_recover_masters %u of %u", count, total);
@@ -658,13 +652,13 @@ static int recover_locks(struct dlm_rsb *r, uint64_t seq)
return error;
}
-int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq)
+int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq,
+ const struct list_head *root_list)
{
struct dlm_rsb *r;
int error, count = 0;
- down_read(&ls->ls_root_sem);
- list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
+ list_for_each_entry(r, root_list, res_root_list) {
if (is_master(r)) {
rsb_clear_flag(r, RSB_NEW_MASTER);
continue;
@@ -675,19 +669,15 @@ int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq)
if (dlm_recovery_stopped(ls)) {
error = -EINTR;
- up_read(&ls->ls_root_sem);
goto out;
}
error = recover_locks(r, seq);
- if (error) {
- up_read(&ls->ls_root_sem);
+ if (error)
goto out;
- }
count += r->res_recover_locks_count;
}
- up_read(&ls->ls_root_sem);
log_rinfo(ls, "dlm_recover_locks %d out", count);
@@ -856,13 +846,12 @@ static void recover_grant(struct dlm_rsb *r)
rsb_set_flag(r, RSB_RECOVER_GRANT);
}
-void dlm_recover_rsbs(struct dlm_ls *ls)
+void dlm_recover_rsbs(struct dlm_ls *ls, const struct list_head *root_list)
{
struct dlm_rsb *r;
unsigned int count = 0;
- down_read(&ls->ls_root_sem);
- list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
+ list_for_each_entry(r, root_list, res_root_list) {
lock_rsb(r);
if (is_master(r)) {
if (rsb_flag(r, RSB_RECOVER_CONVERT))
@@ -883,7 +872,6 @@ void dlm_recover_rsbs(struct dlm_ls *ls)
rsb_clear_flag(r, RSB_NEW_MASTER2);
unlock_rsb(r);
}
- up_read(&ls->ls_root_sem);
if (count)
log_rinfo(ls, "dlm_recover_rsbs %d done", count);
@@ -891,66 +879,25 @@ void dlm_recover_rsbs(struct dlm_ls *ls)
/* Create a single list of all root rsb's to be used during recovery */
-int dlm_create_root_list(struct dlm_ls *ls)
-{
- struct rb_node *n;
- struct dlm_rsb *r;
- int i, error = 0;
-
- down_write(&ls->ls_root_sem);
- if (!list_empty(&ls->ls_root_list)) {
- log_error(ls, "root list not empty");
- error = -EINVAL;
- goto out;
- }
-
- for (i = 0; i < ls->ls_rsbtbl_size; i++) {
- spin_lock(&ls->ls_rsbtbl[i].lock);
- for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
- r = rb_entry(n, struct dlm_rsb, res_hashnode);
- list_add(&r->res_root_list, &ls->ls_root_list);
- dlm_hold_rsb(r);
- }
-
- if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[i].toss))
- log_error(ls, "dlm_create_root_list toss not empty");
- spin_unlock(&ls->ls_rsbtbl[i].lock);
- }
- out:
- up_write(&ls->ls_root_sem);
- return error;
-}
-
-void dlm_release_root_list(struct dlm_ls *ls)
+void dlm_clear_toss(struct dlm_ls *ls)
{
struct dlm_rsb *r, *safe;
+ unsigned int count = 0;
- down_write(&ls->ls_root_sem);
- list_for_each_entry_safe(r, safe, &ls->ls_root_list, res_root_list) {
- list_del_init(&r->res_root_list);
- dlm_put_rsb(r);
- }
- up_write(&ls->ls_root_sem);
-}
+ write_lock_bh(&ls->ls_rsbtbl_lock);
+ list_for_each_entry_safe(r, safe, &ls->ls_toss, res_rsbs_list) {
+ list_del(&r->res_rsbs_list);
+ rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node,
+ dlm_rhash_rsb_params);
-void dlm_clear_toss(struct dlm_ls *ls)
-{
- struct rb_node *n, *next;
- struct dlm_rsb *r;
- unsigned int count = 0;
- int i;
-
- for (i = 0; i < ls->ls_rsbtbl_size; i++) {
- spin_lock(&ls->ls_rsbtbl[i].lock);
- for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) {
- next = rb_next(n);
- r = rb_entry(n, struct dlm_rsb, res_hashnode);
- rb_erase(n, &ls->ls_rsbtbl[i].toss);
- dlm_free_rsb(r);
- count++;
- }
- spin_unlock(&ls->ls_rsbtbl[i].lock);
+ /* remove it from the toss queue if its part of it */
+ if (!list_empty(&r->res_toss_q_list))
+ list_del_init(&r->res_toss_q_list);
+
+ free_toss_rsb(r);
+ count++;
}
+ write_unlock_bh(&ls->ls_rsbtbl_lock);
if (count)
log_rinfo(ls, "dlm_clear_toss %u done", count);
diff --git a/fs/dlm/recover.h b/fs/dlm/recover.h
index dbc51013ecad..efc79a6e577d 100644
--- a/fs/dlm/recover.h
+++ b/fs/dlm/recover.h
@@ -19,14 +19,14 @@ int dlm_recover_members_wait(struct dlm_ls *ls, uint64_t seq);
int dlm_recover_directory_wait(struct dlm_ls *ls, uint64_t seq);
int dlm_recover_locks_wait(struct dlm_ls *ls, uint64_t seq);
int dlm_recover_done_wait(struct dlm_ls *ls, uint64_t seq);
-int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq);
+int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq,
+ const struct list_head *root_list);
int dlm_recover_master_reply(struct dlm_ls *ls, const struct dlm_rcom *rc);
-int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq);
+int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq,
+ const struct list_head *root_list);
void dlm_recovered_lock(struct dlm_rsb *r);
-int dlm_create_root_list(struct dlm_ls *ls);
-void dlm_release_root_list(struct dlm_ls *ls);
void dlm_clear_toss(struct dlm_ls *ls);
-void dlm_recover_rsbs(struct dlm_ls *ls);
+void dlm_recover_rsbs(struct dlm_ls *ls, const struct list_head *root_list);
#endif /* __RECOVER_DOT_H__ */
diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c
index 4d17491dea2f..17a40d1e6036 100644
--- a/fs/dlm/recoverd.c
+++ b/fs/dlm/recoverd.c
@@ -20,6 +20,67 @@
#include "requestqueue.h"
#include "recoverd.h"
+static int dlm_create_masters_list(struct dlm_ls *ls)
+{
+ struct dlm_rsb *r;
+ int error = 0;
+
+ write_lock_bh(&ls->ls_masters_lock);
+ if (!list_empty(&ls->ls_masters_list)) {
+ log_error(ls, "root list not empty");
+ error = -EINVAL;
+ goto out;
+ }
+
+ read_lock_bh(&ls->ls_rsbtbl_lock);
+ list_for_each_entry(r, &ls->ls_keep, res_rsbs_list) {
+ if (r->res_nodeid)
+ continue;
+
+ list_add(&r->res_masters_list, &ls->ls_masters_list);
+ dlm_hold_rsb(r);
+ }
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
+ out:
+ write_unlock_bh(&ls->ls_masters_lock);
+ return error;
+}
+
+static void dlm_release_masters_list(struct dlm_ls *ls)
+{
+ struct dlm_rsb *r, *safe;
+
+ write_lock_bh(&ls->ls_masters_lock);
+ list_for_each_entry_safe(r, safe, &ls->ls_masters_list, res_masters_list) {
+ list_del_init(&r->res_masters_list);
+ dlm_put_rsb(r);
+ }
+ write_unlock_bh(&ls->ls_masters_lock);
+}
+
+static void dlm_create_root_list(struct dlm_ls *ls, struct list_head *root_list)
+{
+ struct dlm_rsb *r;
+
+ read_lock_bh(&ls->ls_rsbtbl_lock);
+ list_for_each_entry(r, &ls->ls_keep, res_rsbs_list) {
+ list_add(&r->res_root_list, root_list);
+ dlm_hold_rsb(r);
+ }
+
+ WARN_ON_ONCE(!list_empty(&ls->ls_toss));
+ read_unlock_bh(&ls->ls_rsbtbl_lock);
+}
+
+static void dlm_release_root_list(struct list_head *root_list)
+{
+ struct dlm_rsb *r, *safe;
+
+ list_for_each_entry_safe(r, safe, root_list, res_root_list) {
+ list_del_init(&r->res_root_list);
+ dlm_put_rsb(r);
+ }
+}
/* If the start for which we're re-enabling locking (seq) has been superseded
by a newer stop (ls_recover_seq), we need to leave locking disabled.
@@ -32,24 +93,35 @@ static int enable_locking(struct dlm_ls *ls, uint64_t seq)
{
int error = -EINTR;
- down_write(&ls->ls_recv_active);
+ write_lock_bh(&ls->ls_recv_active);
- spin_lock(&ls->ls_recover_lock);
+ spin_lock_bh(&ls->ls_recover_lock);
if (ls->ls_recover_seq == seq) {
set_bit(LSFL_RUNNING, &ls->ls_flags);
+ /* Schedule next timer if recovery put something on toss.
+ *
+ * The rsbs that was queued while recovery on toss hasn't
+ * started yet because LSFL_RUNNING was set everything
+ * else recovery hasn't started as well because ls_in_recovery
+ * is still hold. So we should not run into the case that
+ * dlm_timer_resume() queues a timer that can occur in
+ * a no op.
+ */
+ dlm_timer_resume(ls);
/* unblocks processes waiting to enter the dlm */
up_write(&ls->ls_in_recovery);
clear_bit(LSFL_RECOVER_LOCK, &ls->ls_flags);
error = 0;
}
- spin_unlock(&ls->ls_recover_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
- up_write(&ls->ls_recv_active);
+ write_unlock_bh(&ls->ls_recv_active);
return error;
}
static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
{
+ LIST_HEAD(root_list);
unsigned long start;
int error, neg = 0;
@@ -66,7 +138,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
* routines.
*/
- dlm_create_root_list(ls);
+ dlm_create_root_list(ls, &root_list);
/*
* Add or remove nodes from the lockspace's ls_nodes list.
@@ -82,10 +154,25 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
goto fail;
}
- dlm_recover_dir_nodeid(ls);
+ dlm_recover_dir_nodeid(ls, &root_list);
+
+ /* Create a snapshot of all active rsbs were we are the master of.
+ * During the barrier between dlm_recover_members_wait() and
+ * dlm_recover_directory() other nodes can dump their necessary
+ * directory dlm_rsb (r->res_dir_nodeid == nodeid) in rcom
+ * communication dlm_copy_master_names() handling.
+ *
+ * TODO We should create a per lockspace list that contains rsbs
+ * that we are the master of. Instead of creating this list while
+ * recovery we keep track of those rsbs while locking handling and
+ * recovery can use it when necessary.
+ */
+ error = dlm_create_masters_list(ls);
+ if (error) {
+ log_rinfo(ls, "dlm_create_masters_list error %d", error);
+ goto fail_root_list;
+ }
- ls->ls_recover_dir_sent_res = 0;
- ls->ls_recover_dir_sent_msg = 0;
ls->ls_recover_locks_in = 0;
dlm_set_recover_status(ls, DLM_RS_NODES);
@@ -93,7 +180,8 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
error = dlm_recover_members_wait(ls, rv->seq);
if (error) {
log_rinfo(ls, "dlm_recover_members_wait error %d", error);
- goto fail;
+ dlm_release_masters_list(ls);
+ goto fail_root_list;
}
start = jiffies;
@@ -106,7 +194,8 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
error = dlm_recover_directory(ls, rv->seq);
if (error) {
log_rinfo(ls, "dlm_recover_directory error %d", error);
- goto fail;
+ dlm_release_masters_list(ls);
+ goto fail_root_list;
}
dlm_set_recover_status(ls, DLM_RS_DIR);
@@ -114,11 +203,11 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
error = dlm_recover_directory_wait(ls, rv->seq);
if (error) {
log_rinfo(ls, "dlm_recover_directory_wait error %d", error);
- goto fail;
+ dlm_release_masters_list(ls);
+ goto fail_root_list;
}
- log_rinfo(ls, "dlm_recover_directory %u out %u messages",
- ls->ls_recover_dir_sent_res, ls->ls_recover_dir_sent_msg);
+ dlm_release_masters_list(ls);
/*
* We may have outstanding operations that are waiting for a reply from
@@ -130,7 +219,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
if (dlm_recovery_stopped(ls)) {
error = -EINTR;
- goto fail;
+ goto fail_root_list;
}
if (neg || dlm_no_directory(ls)) {
@@ -138,27 +227,27 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
* Clear lkb's for departed nodes.
*/
- dlm_recover_purge(ls);
+ dlm_recover_purge(ls, &root_list);
/*
* Get new master nodeid's for rsb's that were mastered on
* departed nodes.
*/
- error = dlm_recover_masters(ls, rv->seq);
+ error = dlm_recover_masters(ls, rv->seq, &root_list);
if (error) {
log_rinfo(ls, "dlm_recover_masters error %d", error);
- goto fail;
+ goto fail_root_list;
}
/*
* Send our locks on remastered rsb's to the new masters.
*/
- error = dlm_recover_locks(ls, rv->seq);
+ error = dlm_recover_locks(ls, rv->seq, &root_list);
if (error) {
log_rinfo(ls, "dlm_recover_locks error %d", error);
- goto fail;
+ goto fail_root_list;
}
dlm_set_recover_status(ls, DLM_RS_LOCKS);
@@ -166,7 +255,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
error = dlm_recover_locks_wait(ls, rv->seq);
if (error) {
log_rinfo(ls, "dlm_recover_locks_wait error %d", error);
- goto fail;
+ goto fail_root_list;
}
log_rinfo(ls, "dlm_recover_locks %u in",
@@ -178,7 +267,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
* settings.
*/
- dlm_recover_rsbs(ls);
+ dlm_recover_rsbs(ls, &root_list);
} else {
/*
* Other lockspace members may be going through the "neg" steps
@@ -190,11 +279,11 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
error = dlm_recover_locks_wait(ls, rv->seq);
if (error) {
log_rinfo(ls, "dlm_recover_locks_wait error %d", error);
- goto fail;
+ goto fail_root_list;
}
}
- dlm_release_root_list(ls);
+ dlm_release_root_list(&root_list);
/*
* Purge directory-related requests that are saved in requestqueue.
@@ -243,8 +332,9 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
return 0;
+ fail_root_list:
+ dlm_release_root_list(&root_list);
fail:
- dlm_release_root_list(ls);
mutex_unlock(&ls->ls_recoverd_active);
return error;
@@ -259,12 +349,12 @@ static void do_ls_recovery(struct dlm_ls *ls)
struct dlm_recover *rv = NULL;
int error;
- spin_lock(&ls->ls_recover_lock);
+ spin_lock_bh(&ls->ls_recover_lock);
rv = ls->ls_recover_args;
ls->ls_recover_args = NULL;
if (rv && ls->ls_recover_seq == rv->seq)
clear_bit(LSFL_RECOVER_STOP, &ls->ls_flags);
- spin_unlock(&ls->ls_recover_lock);
+ spin_unlock_bh(&ls->ls_recover_lock);
if (rv) {
error = ls_recover(ls, rv);
diff --git a/fs/dlm/requestqueue.c b/fs/dlm/requestqueue.c
index 892d6ca21e74..719a5243a069 100644
--- a/fs/dlm/requestqueue.c
+++ b/fs/dlm/requestqueue.c
@@ -37,7 +37,7 @@ void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid,
int length = le16_to_cpu(ms->m_header.h_length) -
sizeof(struct dlm_message);
- e = kmalloc(sizeof(struct rq_entry) + length, GFP_NOFS);
+ e = kmalloc(sizeof(struct rq_entry) + length, GFP_ATOMIC);
if (!e) {
log_print("dlm_add_requestqueue: out of memory len %d", length);
return;
@@ -48,10 +48,7 @@ void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid,
memcpy(&e->request, ms, sizeof(*ms));
memcpy(&e->request.m_extra, ms->m_extra, length);
- atomic_inc(&ls->ls_requestqueue_cnt);
- mutex_lock(&ls->ls_requestqueue_mutex);
list_add_tail(&e->list, &ls->ls_requestqueue);
- mutex_unlock(&ls->ls_requestqueue_mutex);
}
/*
@@ -71,16 +68,14 @@ int dlm_process_requestqueue(struct dlm_ls *ls)
struct dlm_message *ms;
int error = 0;
- mutex_lock(&ls->ls_requestqueue_mutex);
-
+ write_lock_bh(&ls->ls_requestqueue_lock);
for (;;) {
if (list_empty(&ls->ls_requestqueue)) {
- mutex_unlock(&ls->ls_requestqueue_mutex);
+ clear_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags);
error = 0;
break;
}
- e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list);
- mutex_unlock(&ls->ls_requestqueue_mutex);
+ e = list_first_entry(&ls->ls_requestqueue, struct rq_entry, list);
ms = &e->request;
@@ -93,41 +88,23 @@ int dlm_process_requestqueue(struct dlm_ls *ls)
e->recover_seq);
dlm_receive_message_saved(ls, &e->request, e->recover_seq);
-
- mutex_lock(&ls->ls_requestqueue_mutex);
list_del(&e->list);
- if (atomic_dec_and_test(&ls->ls_requestqueue_cnt))
- wake_up(&ls->ls_requestqueue_wait);
kfree(e);
if (dlm_locking_stopped(ls)) {
log_debug(ls, "process_requestqueue abort running");
- mutex_unlock(&ls->ls_requestqueue_mutex);
error = -EINTR;
break;
}
+ write_unlock_bh(&ls->ls_requestqueue_lock);
schedule();
+ write_lock_bh(&ls->ls_requestqueue_lock);
}
+ write_unlock_bh(&ls->ls_requestqueue_lock);
return error;
}
-/*
- * After recovery is done, locking is resumed and dlm_recoverd takes all the
- * saved requests and processes them as they would have been by dlm_recv. At
- * the same time, dlm_recv will start receiving new requests from remote nodes.
- * We want to delay dlm_recv processing new requests until dlm_recoverd has
- * finished processing the old saved requests. We don't check for locking
- * stopped here because dlm_ls_stop won't stop locking until it's suspended us
- * (dlm_recv).
- */
-
-void dlm_wait_requestqueue(struct dlm_ls *ls)
-{
- wait_event(ls->ls_requestqueue_wait,
- atomic_read(&ls->ls_requestqueue_cnt) == 0);
-}
-
static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid)
{
__le32 type = ms->m_type;
@@ -158,17 +135,15 @@ void dlm_purge_requestqueue(struct dlm_ls *ls)
struct dlm_message *ms;
struct rq_entry *e, *safe;
- mutex_lock(&ls->ls_requestqueue_mutex);
+ write_lock_bh(&ls->ls_requestqueue_lock);
list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) {
ms = &e->request;
if (purge_request(ls, ms, e->nodeid)) {
list_del(&e->list);
- if (atomic_dec_and_test(&ls->ls_requestqueue_cnt))
- wake_up(&ls->ls_requestqueue_wait);
kfree(e);
}
}
- mutex_unlock(&ls->ls_requestqueue_mutex);
+ write_unlock_bh(&ls->ls_requestqueue_lock);
}
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 9f9b68448830..3173b974e8c8 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -145,24 +145,6 @@ static void compat_output(struct dlm_lock_result *res,
}
#endif
-/* should held proc->asts_spin lock */
-void dlm_purge_lkb_callbacks(struct dlm_lkb *lkb)
-{
- struct dlm_callback *cb, *safe;
-
- list_for_each_entry_safe(cb, safe, &lkb->lkb_callbacks, list) {
- list_del(&cb->list);
- kref_put(&cb->ref, dlm_release_callback);
- }
-
- clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags);
-
- /* invalidate */
- dlm_callback_set_last_ptr(&lkb->lkb_last_cast, NULL);
- dlm_callback_set_last_ptr(&lkb->lkb_last_cb, NULL);
- lkb->lkb_last_bast_mode = -1;
-}
-
/* Figure out if this lock is at the end of its life and no longer
available for the application to use. The lkb still exists until
the final ast is read. A lock becomes EOL in three situations:
@@ -199,6 +181,7 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
struct dlm_ls *ls;
struct dlm_user_args *ua;
struct dlm_user_proc *proc;
+ struct dlm_callback *cb;
int rv;
if (test_bit(DLM_DFL_ORPHAN_BIT, &lkb->lkb_dflags) ||
@@ -206,7 +189,7 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
return;
ls = lkb->lkb_resource->res_ls;
- spin_lock(&ls->ls_clear_proc_locks);
+ spin_lock_bh(&ls->ls_clear_proc_locks);
/* If ORPHAN/DEAD flag is set, it means the process is dead so an ast
can't be delivered. For ORPHAN's, dlm_clear_proc_locks() freed
@@ -228,38 +211,44 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
if ((flags & DLM_CB_CAST) && lkb_is_endoflife(mode, status))
set_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags);
- spin_lock(&proc->asts_spin);
+ spin_lock_bh(&proc->asts_spin);
- rv = dlm_enqueue_lkb_callback(lkb, flags, mode, status, sbflags);
+ rv = dlm_queue_lkb_callback(lkb, flags, mode, status, sbflags, &cb);
switch (rv) {
- case DLM_ENQUEUE_CALLBACK_FAILURE:
- spin_unlock(&proc->asts_spin);
- WARN_ON_ONCE(1);
- goto out;
case DLM_ENQUEUE_CALLBACK_NEED_SCHED:
- kref_get(&lkb->lkb_ref);
- list_add_tail(&lkb->lkb_cb_list, &proc->asts);
+ cb->ua = *ua;
+ cb->lkb_lksb = &cb->ua.lksb;
+ if (cb->copy_lvb) {
+ memcpy(cb->lvbptr, ua->lksb.sb_lvbptr,
+ DLM_USER_LVB_LEN);
+ cb->lkb_lksb->sb_lvbptr = cb->lvbptr;
+ }
+
+ list_add_tail(&cb->list, &proc->asts);
wake_up_interruptible(&proc->wait);
break;
case DLM_ENQUEUE_CALLBACK_SUCCESS:
break;
+ case DLM_ENQUEUE_CALLBACK_FAILURE:
+ fallthrough;
default:
+ spin_unlock_bh(&proc->asts_spin);
WARN_ON_ONCE(1);
- break;
+ goto out;
}
- spin_unlock(&proc->asts_spin);
+ spin_unlock_bh(&proc->asts_spin);
if (test_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags)) {
/* N.B. spin_lock locks_spin, not asts_spin */
- spin_lock(&proc->locks_spin);
+ spin_lock_bh(&proc->locks_spin);
if (!list_empty(&lkb->lkb_ownqueue)) {
list_del_init(&lkb->lkb_ownqueue);
dlm_put_lkb(lkb);
}
- spin_unlock(&proc->locks_spin);
+ spin_unlock_bh(&proc->locks_spin);
}
out:
- spin_unlock(&ls->ls_clear_proc_locks);
+ spin_unlock_bh(&ls->ls_clear_proc_locks);
}
static int device_user_lock(struct dlm_user_proc *proc,
@@ -803,11 +792,9 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
struct dlm_user_proc *proc = file->private_data;
- struct dlm_lkb *lkb;
DECLARE_WAITQUEUE(wait, current);
struct dlm_callback *cb;
- int rv, ret, copy_lvb = 0;
- int old_mode, new_mode;
+ int rv, ret;
if (count == sizeof(struct dlm_device_version)) {
rv = copy_version_to_user(buf, count);
@@ -826,16 +813,14 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
#endif
return -EINVAL;
- try_another:
-
/* do we really need this? can a read happen after a close? */
if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
return -EINVAL;
- spin_lock(&proc->asts_spin);
+ spin_lock_bh(&proc->asts_spin);
if (list_empty(&proc->asts)) {
if (file->f_flags & O_NONBLOCK) {
- spin_unlock(&proc->asts_spin);
+ spin_unlock_bh(&proc->asts_spin);
return -EAGAIN;
}
@@ -844,16 +829,16 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
repeat:
set_current_state(TASK_INTERRUPTIBLE);
if (list_empty(&proc->asts) && !signal_pending(current)) {
- spin_unlock(&proc->asts_spin);
+ spin_unlock_bh(&proc->asts_spin);
schedule();
- spin_lock(&proc->asts_spin);
+ spin_lock_bh(&proc->asts_spin);
goto repeat;
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&proc->wait, &wait);
if (signal_pending(current)) {
- spin_unlock(&proc->asts_spin);
+ spin_unlock_bh(&proc->asts_spin);
return -ERESTARTSYS;
}
}
@@ -862,60 +847,24 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
without removing lkb_cb_list; so empty lkb_cb_list is always
consistent with empty lkb_callbacks */
- lkb = list_first_entry(&proc->asts, struct dlm_lkb, lkb_cb_list);
-
- /* rem_lkb_callback sets a new lkb_last_cast */
- old_mode = lkb->lkb_last_cast->mode;
-
- rv = dlm_dequeue_lkb_callback(lkb, &cb);
- switch (rv) {
- case DLM_DEQUEUE_CALLBACK_EMPTY:
- /* this shouldn't happen; lkb should have been removed from
- * list when last item was dequeued
- */
- log_print("dlm_rem_lkb_callback empty %x", lkb->lkb_id);
- list_del_init(&lkb->lkb_cb_list);
- spin_unlock(&proc->asts_spin);
- /* removes ref for proc->asts, may cause lkb to be freed */
- dlm_put_lkb(lkb);
- WARN_ON_ONCE(1);
- goto try_another;
- case DLM_DEQUEUE_CALLBACK_LAST:
- list_del_init(&lkb->lkb_cb_list);
- clear_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags);
- break;
- case DLM_DEQUEUE_CALLBACK_SUCCESS:
- break;
- default:
- WARN_ON_ONCE(1);
- break;
- }
- spin_unlock(&proc->asts_spin);
+ cb = list_first_entry(&proc->asts, struct dlm_callback, list);
+ list_del(&cb->list);
+ spin_unlock_bh(&proc->asts_spin);
if (cb->flags & DLM_CB_BAST) {
- trace_dlm_bast(lkb->lkb_resource->res_ls, lkb, cb->mode);
+ trace_dlm_bast(cb->ls_id, cb->lkb_id, cb->mode, cb->res_name,
+ cb->res_length);
} else if (cb->flags & DLM_CB_CAST) {
- new_mode = cb->mode;
-
- if (!cb->sb_status && lkb->lkb_lksb->sb_lvbptr &&
- dlm_lvb_operations[old_mode + 1][new_mode + 1])
- copy_lvb = 1;
-
- lkb->lkb_lksb->sb_status = cb->sb_status;
- lkb->lkb_lksb->sb_flags = cb->sb_flags;
- trace_dlm_ast(lkb->lkb_resource->res_ls, lkb);
+ cb->lkb_lksb->sb_status = cb->sb_status;
+ cb->lkb_lksb->sb_flags = cb->sb_flags;
+ trace_dlm_ast(cb->ls_id, cb->lkb_id, cb->sb_status,
+ cb->sb_flags, cb->res_name, cb->res_length);
}
- ret = copy_result_to_user(lkb->lkb_ua,
+ ret = copy_result_to_user(&cb->ua,
test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
- cb->flags, cb->mode, copy_lvb, buf, count);
-
- kref_put(&cb->ref, dlm_release_callback);
-
- /* removes ref for proc->asts, may cause lkb to be freed */
- if (rv == DLM_DEQUEUE_CALLBACK_LAST)
- dlm_put_lkb(lkb);
-
+ cb->flags, cb->mode, cb->copy_lvb, buf, count);
+ dlm_free_cb(cb);
return ret;
}
@@ -925,12 +874,12 @@ static __poll_t device_poll(struct file *file, poll_table *wait)
poll_wait(file, &proc->wait, wait);
- spin_lock(&proc->asts_spin);
+ spin_lock_bh(&proc->asts_spin);
if (!list_empty(&proc->asts)) {
- spin_unlock(&proc->asts_spin);
+ spin_unlock_bh(&proc->asts_spin);
return EPOLLIN | EPOLLRDNORM;
}
- spin_unlock(&proc->asts_spin);
+ spin_unlock_bh(&proc->asts_spin);
return 0;
}
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 2fe0f3af1a08..d39a1a69fecc 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1606,9 +1606,7 @@ ecryptfs_add_new_key_tfm(struct ecryptfs_key_tfm **key_tfm, char *cipher_name,
goto out;
}
mutex_init(&tmp_tfm->key_tfm_mutex);
- strncpy(tmp_tfm->cipher_name, cipher_name,
- ECRYPTFS_MAX_CIPHER_NAME_SIZE);
- tmp_tfm->cipher_name[ECRYPTFS_MAX_CIPHER_NAME_SIZE] = '\0';
+ strscpy(tmp_tfm->cipher_name, cipher_name);
tmp_tfm->key_size = key_size;
rc = ecryptfs_process_key_cipher(&tmp_tfm->key_tfm,
tmp_tfm->cipher_name,
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 3fe41964c0d8..7f9f68c00ef6 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -300,9 +300,11 @@ write_tag_66_packet(char *signature, u8 cipher_code,
* | Key Identifier Size | 1 or 2 bytes |
* | Key Identifier | arbitrary |
* | File Encryption Key Size | 1 or 2 bytes |
+ * | Cipher Code | 1 byte |
* | File Encryption Key | arbitrary |
+ * | Checksum | 2 bytes |
*/
- data_len = (5 + ECRYPTFS_SIG_SIZE_HEX + crypt_stat->key_size);
+ data_len = (8 + ECRYPTFS_SIG_SIZE_HEX + crypt_stat->key_size);
*packet = kmalloc(data_len, GFP_KERNEL);
message = *packet;
if (!message) {
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 2dc927ba067f..577c56302314 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -256,11 +256,8 @@ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options,
substring_t args[MAX_OPT_ARGS];
int token;
char *sig_src;
- char *cipher_name_dst;
char *cipher_name_src;
- char *fn_cipher_name_dst;
char *fn_cipher_name_src;
- char *fnek_dst;
char *fnek_src;
char *cipher_key_bytes_src;
char *fn_cipher_key_bytes_src;
@@ -293,12 +290,8 @@ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options,
case ecryptfs_opt_cipher:
case ecryptfs_opt_ecryptfs_cipher:
cipher_name_src = args[0].from;
- cipher_name_dst =
- mount_crypt_stat->
- global_default_cipher_name;
- strncpy(cipher_name_dst, cipher_name_src,
- ECRYPTFS_MAX_CIPHER_NAME_SIZE);
- cipher_name_dst[ECRYPTFS_MAX_CIPHER_NAME_SIZE] = '\0';
+ strscpy(mount_crypt_stat->global_default_cipher_name,
+ cipher_name_src);
cipher_name_set = 1;
break;
case ecryptfs_opt_ecryptfs_key_bytes:
@@ -326,11 +319,8 @@ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options,
break;
case ecryptfs_opt_fnek_sig:
fnek_src = args[0].from;
- fnek_dst =
- mount_crypt_stat->global_default_fnek_sig;
- strncpy(fnek_dst, fnek_src, ECRYPTFS_SIG_SIZE_HEX);
- mount_crypt_stat->global_default_fnek_sig[
- ECRYPTFS_SIG_SIZE_HEX] = '\0';
+ strscpy(mount_crypt_stat->global_default_fnek_sig,
+ fnek_src);
rc = ecryptfs_add_global_auth_tok(
mount_crypt_stat,
mount_crypt_stat->global_default_fnek_sig,
@@ -348,12 +338,8 @@ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options,
break;
case ecryptfs_opt_fn_cipher:
fn_cipher_name_src = args[0].from;
- fn_cipher_name_dst =
- mount_crypt_stat->global_default_fn_cipher_name;
- strncpy(fn_cipher_name_dst, fn_cipher_name_src,
- ECRYPTFS_MAX_CIPHER_NAME_SIZE);
- mount_crypt_stat->global_default_fn_cipher_name[
- ECRYPTFS_MAX_CIPHER_NAME_SIZE] = '\0';
+ strscpy(mount_crypt_stat->global_default_fn_cipher_name,
+ fn_cipher_name_src);
fn_cipher_name_set = 1;
break;
case ecryptfs_opt_fn_cipher_key_bytes:
diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h
index f7206158ee81..d71d2e08422f 100644
--- a/fs/efivarfs/internal.h
+++ b/fs/efivarfs/internal.h
@@ -24,11 +24,8 @@ struct efivarfs_fs_info {
struct efi_variable {
efi_char16_t VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)];
efi_guid_t VendorGuid;
- unsigned long DataSize;
- __u8 Data[1024];
- efi_status_t Status;
__u32 Attributes;
-} __attribute__((packed));
+};
struct efivar_entry {
struct efi_variable var;
diff --git a/fs/efivarfs/vars.c b/fs/efivarfs/vars.c
index 4d722af1014f..3cc89bb624f0 100644
--- a/fs/efivarfs/vars.c
+++ b/fs/efivarfs/vars.c
@@ -295,9 +295,9 @@ static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor,
unsigned long strsize1, strsize2;
bool found = false;
- strsize1 = ucs2_strsize(variable_name, 1024);
+ strsize1 = ucs2_strsize(variable_name, EFI_VAR_NAME_LEN);
list_for_each_entry_safe(entry, n, head, list) {
- strsize2 = ucs2_strsize(entry->var.VariableName, 1024);
+ strsize2 = ucs2_strsize(entry->var.VariableName, EFI_VAR_NAME_LEN);
if (strsize1 == strsize2 &&
!memcmp(variable_name, &(entry->var.VariableName),
strsize2) &&
@@ -396,6 +396,7 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *,
do {
variable_name_size = 512;
+ BUILD_BUG_ON(EFI_VAR_NAME_LEN < 512);
status = efivar_get_next_variable(&variable_name_size,
variable_name,
diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig
index fffd3919343e..7dcdce660cac 100644
--- a/fs/erofs/Kconfig
+++ b/fs/erofs/Kconfig
@@ -112,6 +112,21 @@ config EROFS_FS_ZIP_DEFLATE
If unsure, say N.
+config EROFS_FS_ZIP_ZSTD
+ bool "EROFS Zstandard compressed data support"
+ depends on EROFS_FS_ZIP
+ select ZSTD_DECOMPRESS
+ help
+ Saying Y here includes support for reading EROFS file systems
+ containing Zstandard compressed data. It gives better compression
+ ratios than the default LZ4 format, while it costs more CPU
+ overhead.
+
+ Zstandard support is an experimental feature for now and so most
+ file systems will be readable without selecting this option.
+
+ If unsure, say N.
+
config EROFS_FS_ONDEMAND
bool "EROFS fscache-based on-demand read support"
depends on EROFS_FS
diff --git a/fs/erofs/Makefile b/fs/erofs/Makefile
index 994d0b9deddf..097d672e6b14 100644
--- a/fs/erofs/Makefile
+++ b/fs/erofs/Makefile
@@ -1,9 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_EROFS_FS) += erofs.o
-erofs-objs := super.o inode.o data.o namei.o dir.o utils.o sysfs.o
+erofs-objs := super.o inode.o data.o namei.o dir.o sysfs.o
erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o
-erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o pcpubuf.o
+erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o zutil.o
erofs-$(CONFIG_EROFS_FS_ZIP_LZMA) += decompressor_lzma.o
erofs-$(CONFIG_EROFS_FS_ZIP_DEFLATE) += decompressor_deflate.o
+erofs-$(CONFIG_EROFS_FS_ZIP_ZSTD) += decompressor_zstd.o
erofs-$(CONFIG_EROFS_FS_ONDEMAND) += fscache.o
diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h
index 333587ba6183..19d53c30c8af 100644
--- a/fs/erofs/compress.h
+++ b/fs/erofs/compress.h
@@ -90,8 +90,12 @@ int z_erofs_load_lzma_config(struct super_block *sb,
struct erofs_super_block *dsb, void *data, int size);
int z_erofs_load_deflate_config(struct super_block *sb,
struct erofs_super_block *dsb, void *data, int size);
+int z_erofs_load_zstd_config(struct super_block *sb,
+ struct erofs_super_block *dsb, void *data, int size);
int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
struct page **pagepool);
int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
struct page **pagepool);
+int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
+ struct page **pgpl);
#endif
diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
index 2ec9b2bb628d..9d85b6c11c6b 100644
--- a/fs/erofs/decompressor.c
+++ b/fs/erofs/decompressor.c
@@ -54,7 +54,7 @@ static int z_erofs_load_lz4_config(struct super_block *sb,
sbi->lz4.max_distance_pages = distance ?
DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
LZ4_MAX_DISTANCE_PAGES;
- return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks);
+ return z_erofs_gbuf_growsize(sbi->lz4.max_pclusterblks);
}
/*
@@ -111,7 +111,7 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
victim = availables[--top];
get_page(victim);
} else {
- victim = erofs_allocpage(pagepool, rq->gfp);
+ victim = __erofs_allocpage(pagepool, rq->gfp, true);
if (!victim)
return -ENOMEM;
set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
@@ -159,7 +159,7 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
docopy:
/* Or copy compressed data which can be overlapped to per-CPU buffer */
in = rq->in;
- src = erofs_get_pcpubuf(ctx->inpages);
+ src = z_erofs_get_gbuf(ctx->inpages);
if (!src) {
DBG_BUGON(1);
kunmap_local(inpage);
@@ -260,7 +260,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
} else if (maptype == 1) {
vm_unmap_ram(src, ctx->inpages);
} else if (maptype == 2) {
- erofs_put_pcpubuf(src);
+ z_erofs_put_gbuf(src);
} else if (maptype != 3) {
DBG_BUGON(1);
return -EFAULT;
@@ -399,6 +399,13 @@ const struct z_erofs_decompressor erofs_decompressors[] = {
.name = "deflate"
},
#endif
+#ifdef CONFIG_EROFS_FS_ZIP_ZSTD
+ [Z_EROFS_COMPRESSION_ZSTD] = {
+ .config = z_erofs_load_zstd_config,
+ .decompress = z_erofs_zstd_decompress,
+ .name = "zstd"
+ },
+#endif
};
int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb)
diff --git a/fs/erofs/decompressor_zstd.c b/fs/erofs/decompressor_zstd.c
new file mode 100644
index 000000000000..63a23cac3af4
--- /dev/null
+++ b/fs/erofs/decompressor_zstd.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/zstd.h>
+#include "compress.h"
+
+struct z_erofs_zstd {
+ struct z_erofs_zstd *next;
+ u8 bounce[PAGE_SIZE];
+ void *wksp;
+ unsigned int wkspsz;
+};
+
+static DEFINE_SPINLOCK(z_erofs_zstd_lock);
+static unsigned int z_erofs_zstd_max_dictsize;
+static unsigned int z_erofs_zstd_nstrms, z_erofs_zstd_avail_strms;
+static struct z_erofs_zstd *z_erofs_zstd_head;
+static DECLARE_WAIT_QUEUE_HEAD(z_erofs_zstd_wq);
+
+module_param_named(zstd_streams, z_erofs_zstd_nstrms, uint, 0444);
+
+static struct z_erofs_zstd *z_erofs_isolate_strms(bool all)
+{
+ struct z_erofs_zstd *strm;
+
+again:
+ spin_lock(&z_erofs_zstd_lock);
+ strm = z_erofs_zstd_head;
+ if (!strm) {
+ spin_unlock(&z_erofs_zstd_lock);
+ wait_event(z_erofs_zstd_wq, READ_ONCE(z_erofs_zstd_head));
+ goto again;
+ }
+ z_erofs_zstd_head = all ? NULL : strm->next;
+ spin_unlock(&z_erofs_zstd_lock);
+ return strm;
+}
+
+void z_erofs_zstd_exit(void)
+{
+ while (z_erofs_zstd_avail_strms) {
+ struct z_erofs_zstd *strm, *n;
+
+ for (strm = z_erofs_isolate_strms(true); strm; strm = n) {
+ n = strm->next;
+
+ kvfree(strm->wksp);
+ kfree(strm);
+ --z_erofs_zstd_avail_strms;
+ }
+ }
+}
+
+int __init z_erofs_zstd_init(void)
+{
+ /* by default, use # of possible CPUs instead */
+ if (!z_erofs_zstd_nstrms)
+ z_erofs_zstd_nstrms = num_possible_cpus();
+
+ for (; z_erofs_zstd_avail_strms < z_erofs_zstd_nstrms;
+ ++z_erofs_zstd_avail_strms) {
+ struct z_erofs_zstd *strm;
+
+ strm = kzalloc(sizeof(*strm), GFP_KERNEL);
+ if (!strm) {
+ z_erofs_zstd_exit();
+ return -ENOMEM;
+ }
+ spin_lock(&z_erofs_zstd_lock);
+ strm->next = z_erofs_zstd_head;
+ z_erofs_zstd_head = strm;
+ spin_unlock(&z_erofs_zstd_lock);
+ }
+ return 0;
+}
+
+int z_erofs_load_zstd_config(struct super_block *sb,
+ struct erofs_super_block *dsb, void *data, int size)
+{
+ static DEFINE_MUTEX(zstd_resize_mutex);
+ struct z_erofs_zstd_cfgs *zstd = data;
+ unsigned int dict_size, wkspsz;
+ struct z_erofs_zstd *strm, *head = NULL;
+ void *wksp;
+
+ if (!zstd || size < sizeof(struct z_erofs_zstd_cfgs) || zstd->format) {
+ erofs_err(sb, "unsupported zstd format, size=%u", size);
+ return -EINVAL;
+ }
+
+ if (zstd->windowlog > ilog2(Z_EROFS_ZSTD_MAX_DICT_SIZE) - 10) {
+ erofs_err(sb, "unsupported zstd window log %u", zstd->windowlog);
+ return -EINVAL;
+ }
+ dict_size = 1U << (zstd->windowlog + 10);
+
+ /* in case 2 z_erofs_load_zstd_config() race to avoid deadlock */
+ mutex_lock(&zstd_resize_mutex);
+ if (z_erofs_zstd_max_dictsize >= dict_size) {
+ mutex_unlock(&zstd_resize_mutex);
+ return 0;
+ }
+
+ /* 1. collect/isolate all streams for the following check */
+ while (z_erofs_zstd_avail_strms) {
+ struct z_erofs_zstd *n;
+
+ for (strm = z_erofs_isolate_strms(true); strm; strm = n) {
+ n = strm->next;
+ strm->next = head;
+ head = strm;
+ --z_erofs_zstd_avail_strms;
+ }
+ }
+
+ /* 2. walk each isolated stream and grow max dict_size if needed */
+ wkspsz = zstd_dstream_workspace_bound(dict_size);
+ for (strm = head; strm; strm = strm->next) {
+ wksp = kvmalloc(wkspsz, GFP_KERNEL);
+ if (!wksp)
+ break;
+ kvfree(strm->wksp);
+ strm->wksp = wksp;
+ strm->wkspsz = wkspsz;
+ }
+
+ /* 3. push back all to the global list and update max dict_size */
+ spin_lock(&z_erofs_zstd_lock);
+ DBG_BUGON(z_erofs_zstd_head);
+ z_erofs_zstd_head = head;
+ spin_unlock(&z_erofs_zstd_lock);
+ z_erofs_zstd_avail_strms = z_erofs_zstd_nstrms;
+ wake_up_all(&z_erofs_zstd_wq);
+ if (!strm)
+ z_erofs_zstd_max_dictsize = dict_size;
+ mutex_unlock(&zstd_resize_mutex);
+ return strm ? -ENOMEM : 0;
+}
+
+int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq,
+ struct page **pgpl)
+{
+ const unsigned int nrpages_out =
+ PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
+ const unsigned int nrpages_in =
+ PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
+ zstd_dstream *stream;
+ struct super_block *sb = rq->sb;
+ unsigned int insz, outsz, pofs;
+ struct z_erofs_zstd *strm;
+ zstd_in_buffer in_buf = { NULL, 0, 0 };
+ zstd_out_buffer out_buf = { NULL, 0, 0 };
+ u8 *kin, *kout = NULL;
+ bool bounced = false;
+ int no = -1, ni = 0, j = 0, zerr, err;
+
+ /* 1. get the exact compressed size */
+ kin = kmap_local_page(*rq->in);
+ err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in,
+ min_t(unsigned int, rq->inputsize,
+ sb->s_blocksize - rq->pageofs_in));
+ if (err) {
+ kunmap_local(kin);
+ return err;
+ }
+
+ /* 2. get an available ZSTD context */
+ strm = z_erofs_isolate_strms(false);
+
+ /* 3. multi-call decompress */
+ insz = rq->inputsize;
+ outsz = rq->outputsize;
+ stream = zstd_init_dstream(z_erofs_zstd_max_dictsize, strm->wksp, strm->wkspsz);
+ if (!stream) {
+ err = -EIO;
+ goto failed_zinit;
+ }
+
+ pofs = rq->pageofs_out;
+ in_buf.size = min_t(u32, insz, PAGE_SIZE - rq->pageofs_in);
+ insz -= in_buf.size;
+ in_buf.src = kin + rq->pageofs_in;
+ do {
+ if (out_buf.size == out_buf.pos) {
+ if (++no >= nrpages_out || !outsz) {
+ erofs_err(sb, "insufficient space for decompressed data");
+ err = -EFSCORRUPTED;
+ break;
+ }
+
+ if (kout)
+ kunmap_local(kout);
+ out_buf.size = min_t(u32, outsz, PAGE_SIZE - pofs);
+ outsz -= out_buf.size;
+ if (!rq->out[no]) {
+ rq->out[no] = erofs_allocpage(pgpl, rq->gfp);
+ if (!rq->out[no]) {
+ kout = NULL;
+ err = -ENOMEM;
+ break;
+ }
+ set_page_private(rq->out[no],
+ Z_EROFS_SHORTLIVED_PAGE);
+ }
+ kout = kmap_local_page(rq->out[no]);
+ out_buf.dst = kout + pofs;
+ out_buf.pos = 0;
+ pofs = 0;
+ }
+
+ if (in_buf.size == in_buf.pos && insz) {
+ if (++ni >= nrpages_in) {
+ erofs_err(sb, "invalid compressed data");
+ err = -EFSCORRUPTED;
+ break;
+ }
+
+ if (kout) /* unlike kmap(), take care of the orders */
+ kunmap_local(kout);
+ kunmap_local(kin);
+ in_buf.size = min_t(u32, insz, PAGE_SIZE);
+ insz -= in_buf.size;
+ kin = kmap_local_page(rq->in[ni]);
+ in_buf.src = kin;
+ in_buf.pos = 0;
+ bounced = false;
+ if (kout) {
+ j = (u8 *)out_buf.dst - kout;
+ kout = kmap_local_page(rq->out[no]);
+ out_buf.dst = kout + j;
+ }
+ }
+
+ /*
+ * Handle overlapping: Use bounced buffer if the compressed
+ * data is under processing; Or use short-lived pages from the
+ * on-stack pagepool where pages share among the same request
+ * and not _all_ inplace I/O pages are needed to be doubled.
+ */
+ if (!bounced && rq->out[no] == rq->in[ni]) {
+ memcpy(strm->bounce, in_buf.src, in_buf.size);
+ in_buf.src = strm->bounce;
+ bounced = true;
+ }
+
+ for (j = ni + 1; j < nrpages_in; ++j) {
+ struct page *tmppage;
+
+ if (rq->out[no] != rq->in[j])
+ continue;
+ tmppage = erofs_allocpage(pgpl, rq->gfp);
+ if (!tmppage) {
+ err = -ENOMEM;
+ goto failed;
+ }
+ set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
+ copy_highpage(tmppage, rq->in[j]);
+ rq->in[j] = tmppage;
+ }
+ zerr = zstd_decompress_stream(stream, &out_buf, &in_buf);
+ if (zstd_is_error(zerr) || (!zerr && outsz)) {
+ erofs_err(sb, "failed to decompress in[%u] out[%u]: %s",
+ rq->inputsize, rq->outputsize,
+ zerr ? zstd_get_error_name(zerr) : "unexpected end of stream");
+ err = -EFSCORRUPTED;
+ break;
+ }
+ } while (outsz || out_buf.pos < out_buf.size);
+failed:
+ if (kout)
+ kunmap_local(kout);
+failed_zinit:
+ kunmap_local(kin);
+ /* 4. push back ZSTD stream context to the global list */
+ spin_lock(&z_erofs_zstd_lock);
+ strm->next = z_erofs_zstd_head;
+ z_erofs_zstd_head = strm;
+ spin_unlock(&z_erofs_zstd_lock);
+ wake_up(&z_erofs_zstd_wq);
+ return err;
+}
diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h
index a03ec70ba6f2..6c0c270c42e1 100644
--- a/fs/erofs/erofs_fs.h
+++ b/fs/erofs/erofs_fs.h
@@ -296,6 +296,7 @@ enum {
Z_EROFS_COMPRESSION_LZ4 = 0,
Z_EROFS_COMPRESSION_LZMA = 1,
Z_EROFS_COMPRESSION_DEFLATE = 2,
+ Z_EROFS_COMPRESSION_ZSTD = 3,
Z_EROFS_COMPRESSION_MAX
};
#define Z_EROFS_ALL_COMPR_ALGS ((1 << Z_EROFS_COMPRESSION_MAX) - 1)
@@ -322,6 +323,15 @@ struct z_erofs_deflate_cfgs {
u8 reserved[5];
} __packed;
+/* 6 bytes (+ length field = 8 bytes) */
+struct z_erofs_zstd_cfgs {
+ u8 format;
+ u8 windowlog; /* windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN(10) */
+ u8 reserved[4];
+} __packed;
+
+#define Z_EROFS_ZSTD_MAX_DICT_SIZE Z_EROFS_PCLUSTER_MAX_SIZE
+
/*
* bit 0 : COMPACTED_2B indexes (0 - off; 1 - on)
* e.g. for 4k logical cluster size, 4B if compacted 2B is off;
@@ -396,8 +406,7 @@ enum {
Z_EROFS_LCLUSTER_TYPE_MAX
};
-#define Z_EROFS_LI_LCLUSTER_TYPE_BITS 2
-#define Z_EROFS_LI_LCLUSTER_TYPE_BIT 0
+#define Z_EROFS_LI_LCLUSTER_TYPE_MASK (Z_EROFS_LCLUSTER_TYPE_MAX - 1)
/* (noncompact only, HEAD) This pcluster refers to partial decompressed data */
#define Z_EROFS_LI_PARTIAL_REF (1 << 15)
@@ -451,8 +460,6 @@ static inline void erofs_check_ondisk_layout_definitions(void)
sizeof(struct z_erofs_lcluster_index));
BUILD_BUG_ON(sizeof(struct erofs_deviceslot) != 128);
- BUILD_BUG_ON(BIT(Z_EROFS_LI_LCLUSTER_TYPE_BITS) <
- Z_EROFS_LCLUSTER_TYPE_MAX - 1);
/* exclude old compiler versions like gcc 7.5.0 */
BUILD_BUG_ON(__builtin_constant_p(fmh) ?
fmh != cpu_to_le64(1ULL << 63) : 0);
diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c
index 8aff1a724805..62da538d91cb 100644
--- a/fs/erofs/fscache.c
+++ b/fs/erofs/fscache.c
@@ -151,7 +151,7 @@ static int erofs_fscache_read_io_async(struct fscache_cookie *cookie,
if (WARN_ON(len == 0))
source = NETFS_INVALID_READ;
if (source != NETFS_READ_FROM_CACHE) {
- erofs_err(NULL, "prepare_read failed (source %d)", source);
+ erofs_err(NULL, "prepare_ondemand_read failed (source %d)", source);
return -EIO;
}
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index 39c67119f43b..21def866a482 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -84,13 +84,6 @@ struct erofs_dev_context {
bool flatdev;
};
-struct erofs_fs_context {
- struct erofs_mount_opts opt;
- struct erofs_dev_context *devs;
- char *fsid;
- char *domain_id;
-};
-
/* all filesystem-wide lz4 configurations */
struct erofs_sb_lz4_info {
/* # of pages needed for EROFS lz4 rolling decompression */
@@ -445,7 +438,11 @@ void erofs_unregister_sysfs(struct super_block *sb);
int __init erofs_init_sysfs(void);
void erofs_exit_sysfs(void);
-struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp);
+struct page *__erofs_allocpage(struct page **pagepool, gfp_t gfp, bool tryrsv);
+static inline struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp)
+{
+ return __erofs_allocpage(pagepool, gfp, false);
+}
static inline void erofs_pagepool_add(struct page **pagepool, struct page *page)
{
set_page_private(page, (unsigned long)*pagepool);
@@ -470,11 +467,11 @@ int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi,
struct erofs_workgroup *egrp);
int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
int flags);
-void *erofs_get_pcpubuf(unsigned int requiredpages);
-void erofs_put_pcpubuf(void *ptr);
-int erofs_pcpubuf_growsize(unsigned int nrpages);
-void __init erofs_pcpubuf_init(void);
-void erofs_pcpubuf_exit(void);
+void *z_erofs_get_gbuf(unsigned int requiredpages);
+void z_erofs_put_gbuf(void *ptr);
+int z_erofs_gbuf_growsize(unsigned int nrpages);
+int __init z_erofs_gbuf_init(void);
+void z_erofs_gbuf_exit(void);
int erofs_init_managed_cache(struct super_block *sb);
int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb);
#else
@@ -484,8 +481,8 @@ static inline int erofs_init_shrinker(void) { return 0; }
static inline void erofs_exit_shrinker(void) {}
static inline int z_erofs_init_zip_subsystem(void) { return 0; }
static inline void z_erofs_exit_zip_subsystem(void) {}
-static inline void erofs_pcpubuf_init(void) {}
-static inline void erofs_pcpubuf_exit(void) {}
+static inline int z_erofs_gbuf_init(void) { return 0; }
+static inline void z_erofs_gbuf_exit(void) {}
static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; }
#endif /* !CONFIG_EROFS_FS_ZIP */
@@ -505,6 +502,14 @@ static inline int z_erofs_deflate_init(void) { return 0; }
static inline int z_erofs_deflate_exit(void) { return 0; }
#endif /* !CONFIG_EROFS_FS_ZIP_DEFLATE */
+#ifdef CONFIG_EROFS_FS_ZIP_ZSTD
+int __init z_erofs_zstd_init(void);
+void z_erofs_zstd_exit(void);
+#else
+static inline int z_erofs_zstd_init(void) { return 0; }
+static inline int z_erofs_zstd_exit(void) { return 0; }
+#endif /* !CONFIG_EROFS_FS_ZIP_ZSTD */
+
#ifdef CONFIG_EROFS_FS_ONDEMAND
int erofs_fscache_register_fs(struct super_block *sb);
void erofs_fscache_unregister_fs(struct super_block *sb);
diff --git a/fs/erofs/pcpubuf.c b/fs/erofs/pcpubuf.c
deleted file mode 100644
index c7a4b1d77069..000000000000
--- a/fs/erofs/pcpubuf.c
+++ /dev/null
@@ -1,148 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) Gao Xiang <xiang@kernel.org>
- *
- * For low-latency decompression algorithms (e.g. lz4), reserve consecutive
- * per-CPU virtual memory (in pages) in advance to store such inplace I/O
- * data if inplace decompression is failed (due to unmet inplace margin for
- * example).
- */
-#include "internal.h"
-
-struct erofs_pcpubuf {
- raw_spinlock_t lock;
- void *ptr;
- struct page **pages;
- unsigned int nrpages;
-};
-
-static DEFINE_PER_CPU(struct erofs_pcpubuf, erofs_pcb);
-
-void *erofs_get_pcpubuf(unsigned int requiredpages)
- __acquires(pcb->lock)
-{
- struct erofs_pcpubuf *pcb = &get_cpu_var(erofs_pcb);
-
- raw_spin_lock(&pcb->lock);
- /* check if the per-CPU buffer is too small */
- if (requiredpages > pcb->nrpages) {
- raw_spin_unlock(&pcb->lock);
- put_cpu_var(erofs_pcb);
- /* (for sparse checker) pretend pcb->lock is still taken */
- __acquire(pcb->lock);
- return NULL;
- }
- return pcb->ptr;
-}
-
-void erofs_put_pcpubuf(void *ptr) __releases(pcb->lock)
-{
- struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, smp_processor_id());
-
- DBG_BUGON(pcb->ptr != ptr);
- raw_spin_unlock(&pcb->lock);
- put_cpu_var(erofs_pcb);
-}
-
-/* the next step: support per-CPU page buffers hotplug */
-int erofs_pcpubuf_growsize(unsigned int nrpages)
-{
- static DEFINE_MUTEX(pcb_resize_mutex);
- static unsigned int pcb_nrpages;
- struct page *pagepool = NULL;
- int delta, cpu, ret, i;
-
- mutex_lock(&pcb_resize_mutex);
- delta = nrpages - pcb_nrpages;
- ret = 0;
- /* avoid shrinking pcpubuf, since no idea how many fses rely on */
- if (delta <= 0)
- goto out;
-
- for_each_possible_cpu(cpu) {
- struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu);
- struct page **pages, **oldpages;
- void *ptr, *old_ptr;
-
- pages = kmalloc_array(nrpages, sizeof(*pages), GFP_KERNEL);
- if (!pages) {
- ret = -ENOMEM;
- break;
- }
-
- for (i = 0; i < nrpages; ++i) {
- pages[i] = erofs_allocpage(&pagepool, GFP_KERNEL);
- if (!pages[i]) {
- ret = -ENOMEM;
- oldpages = pages;
- goto free_pagearray;
- }
- }
- ptr = vmap(pages, nrpages, VM_MAP, PAGE_KERNEL);
- if (!ptr) {
- ret = -ENOMEM;
- oldpages = pages;
- goto free_pagearray;
- }
- raw_spin_lock(&pcb->lock);
- old_ptr = pcb->ptr;
- pcb->ptr = ptr;
- oldpages = pcb->pages;
- pcb->pages = pages;
- i = pcb->nrpages;
- pcb->nrpages = nrpages;
- raw_spin_unlock(&pcb->lock);
-
- if (!oldpages) {
- DBG_BUGON(old_ptr);
- continue;
- }
-
- if (old_ptr)
- vunmap(old_ptr);
-free_pagearray:
- while (i)
- erofs_pagepool_add(&pagepool, oldpages[--i]);
- kfree(oldpages);
- if (ret)
- break;
- }
- pcb_nrpages = nrpages;
- erofs_release_pages(&pagepool);
-out:
- mutex_unlock(&pcb_resize_mutex);
- return ret;
-}
-
-void __init erofs_pcpubuf_init(void)
-{
- int cpu;
-
- for_each_possible_cpu(cpu) {
- struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu);
-
- raw_spin_lock_init(&pcb->lock);
- }
-}
-
-void erofs_pcpubuf_exit(void)
-{
- int cpu, i;
-
- for_each_possible_cpu(cpu) {
- struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu);
-
- if (pcb->ptr) {
- vunmap(pcb->ptr);
- pcb->ptr = NULL;
- }
- if (!pcb->pages)
- continue;
-
- for (i = 0; i < pcb->nrpages; ++i)
- if (pcb->pages[i])
- put_page(pcb->pages[i]);
- kfree(pcb->pages);
- pcb->pages = NULL;
- }
-}
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index 69308fd73e4a..044c79229a78 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -370,18 +370,18 @@ out:
return ret;
}
-static void erofs_default_options(struct erofs_fs_context *ctx)
+static void erofs_default_options(struct erofs_sb_info *sbi)
{
#ifdef CONFIG_EROFS_FS_ZIP
- ctx->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND;
- ctx->opt.max_sync_decompress_pages = 3;
- ctx->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO;
+ sbi->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND;
+ sbi->opt.max_sync_decompress_pages = 3;
+ sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO;
#endif
#ifdef CONFIG_EROFS_FS_XATTR
- set_opt(&ctx->opt, XATTR_USER);
+ set_opt(&sbi->opt, XATTR_USER);
#endif
#ifdef CONFIG_EROFS_FS_POSIX_ACL
- set_opt(&ctx->opt, POSIX_ACL);
+ set_opt(&sbi->opt, POSIX_ACL);
#endif
}
@@ -426,17 +426,16 @@ static const struct fs_parameter_spec erofs_fs_parameters[] = {
static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
{
#ifdef CONFIG_FS_DAX
- struct erofs_fs_context *ctx = fc->fs_private;
+ struct erofs_sb_info *sbi = fc->s_fs_info;
switch (mode) {
case EROFS_MOUNT_DAX_ALWAYS:
- warnfc(fc, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
- set_opt(&ctx->opt, DAX_ALWAYS);
- clear_opt(&ctx->opt, DAX_NEVER);
+ set_opt(&sbi->opt, DAX_ALWAYS);
+ clear_opt(&sbi->opt, DAX_NEVER);
return true;
case EROFS_MOUNT_DAX_NEVER:
- set_opt(&ctx->opt, DAX_NEVER);
- clear_opt(&ctx->opt, DAX_ALWAYS);
+ set_opt(&sbi->opt, DAX_NEVER);
+ clear_opt(&sbi->opt, DAX_ALWAYS);
return true;
default:
DBG_BUGON(1);
@@ -451,7 +450,7 @@ static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
static int erofs_fc_parse_param(struct fs_context *fc,
struct fs_parameter *param)
{
- struct erofs_fs_context *ctx = fc->fs_private;
+ struct erofs_sb_info *sbi = fc->s_fs_info;
struct fs_parse_result result;
struct erofs_device_info *dif;
int opt, ret;
@@ -464,9 +463,9 @@ static int erofs_fc_parse_param(struct fs_context *fc,
case Opt_user_xattr:
#ifdef CONFIG_EROFS_FS_XATTR
if (result.boolean)
- set_opt(&ctx->opt, XATTR_USER);
+ set_opt(&sbi->opt, XATTR_USER);
else
- clear_opt(&ctx->opt, XATTR_USER);
+ clear_opt(&sbi->opt, XATTR_USER);
#else
errorfc(fc, "{,no}user_xattr options not supported");
#endif
@@ -474,16 +473,16 @@ static int erofs_fc_parse_param(struct fs_context *fc,
case Opt_acl:
#ifdef CONFIG_EROFS_FS_POSIX_ACL
if (result.boolean)
- set_opt(&ctx->opt, POSIX_ACL);
+ set_opt(&sbi->opt, POSIX_ACL);
else
- clear_opt(&ctx->opt, POSIX_ACL);
+ clear_opt(&sbi->opt, POSIX_ACL);
#else
errorfc(fc, "{,no}acl options not supported");
#endif
break;
case Opt_cache_strategy:
#ifdef CONFIG_EROFS_FS_ZIP
- ctx->opt.cache_strategy = result.uint_32;
+ sbi->opt.cache_strategy = result.uint_32;
#else
errorfc(fc, "compression not supported, cache_strategy ignored");
#endif
@@ -505,27 +504,27 @@ static int erofs_fc_parse_param(struct fs_context *fc,
kfree(dif);
return -ENOMEM;
}
- down_write(&ctx->devs->rwsem);
- ret = idr_alloc(&ctx->devs->tree, dif, 0, 0, GFP_KERNEL);
- up_write(&ctx->devs->rwsem);
+ down_write(&sbi->devs->rwsem);
+ ret = idr_alloc(&sbi->devs->tree, dif, 0, 0, GFP_KERNEL);
+ up_write(&sbi->devs->rwsem);
if (ret < 0) {
kfree(dif->path);
kfree(dif);
return ret;
}
- ++ctx->devs->extra_devices;
+ ++sbi->devs->extra_devices;
break;
#ifdef CONFIG_EROFS_FS_ONDEMAND
case Opt_fsid:
- kfree(ctx->fsid);
- ctx->fsid = kstrdup(param->string, GFP_KERNEL);
- if (!ctx->fsid)
+ kfree(sbi->fsid);
+ sbi->fsid = kstrdup(param->string, GFP_KERNEL);
+ if (!sbi->fsid)
return -ENOMEM;
break;
case Opt_domain_id:
- kfree(ctx->domain_id);
- ctx->domain_id = kstrdup(param->string, GFP_KERNEL);
- if (!ctx->domain_id)
+ kfree(sbi->domain_id);
+ sbi->domain_id = kstrdup(param->string, GFP_KERNEL);
+ if (!sbi->domain_id)
return -ENOMEM;
break;
#else
@@ -582,8 +581,7 @@ static const struct export_operations erofs_export_ops = {
static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct inode *inode;
- struct erofs_sb_info *sbi;
- struct erofs_fs_context *ctx = fc->fs_private;
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
int err;
sb->s_magic = EROFS_SUPER_MAGIC;
@@ -591,19 +589,6 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_op = &erofs_sops;
- sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
- if (!sbi)
- return -ENOMEM;
-
- sb->s_fs_info = sbi;
- sbi->opt = ctx->opt;
- sbi->devs = ctx->devs;
- ctx->devs = NULL;
- sbi->fsid = ctx->fsid;
- ctx->fsid = NULL;
- sbi->domain_id = ctx->domain_id;
- ctx->domain_id = NULL;
-
sbi->blkszbits = PAGE_SHIFT;
if (erofs_is_fscache_mode(sb)) {
sb->s_blocksize = PAGE_SIZE;
@@ -707,9 +692,9 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
static int erofs_fc_get_tree(struct fs_context *fc)
{
- struct erofs_fs_context *ctx = fc->fs_private;
+ struct erofs_sb_info *sbi = fc->s_fs_info;
- if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && ctx->fsid)
+ if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid)
return get_tree_nodev(fc, erofs_fc_fill_super);
return get_tree_bdev(fc, erofs_fc_fill_super);
@@ -719,19 +704,19 @@ static int erofs_fc_reconfigure(struct fs_context *fc)
{
struct super_block *sb = fc->root->d_sb;
struct erofs_sb_info *sbi = EROFS_SB(sb);
- struct erofs_fs_context *ctx = fc->fs_private;
+ struct erofs_sb_info *new_sbi = fc->s_fs_info;
DBG_BUGON(!sb_rdonly(sb));
- if (ctx->fsid || ctx->domain_id)
+ if (new_sbi->fsid || new_sbi->domain_id)
erofs_info(sb, "ignoring reconfiguration for fsid|domain_id.");
- if (test_opt(&ctx->opt, POSIX_ACL))
+ if (test_opt(&new_sbi->opt, POSIX_ACL))
fc->sb_flags |= SB_POSIXACL;
else
fc->sb_flags &= ~SB_POSIXACL;
- sbi->opt = ctx->opt;
+ sbi->opt = new_sbi->opt;
fc->sb_flags |= SB_RDONLY;
return 0;
@@ -762,12 +747,15 @@ static void erofs_free_dev_context(struct erofs_dev_context *devs)
static void erofs_fc_free(struct fs_context *fc)
{
- struct erofs_fs_context *ctx = fc->fs_private;
+ struct erofs_sb_info *sbi = fc->s_fs_info;
- erofs_free_dev_context(ctx->devs);
- kfree(ctx->fsid);
- kfree(ctx->domain_id);
- kfree(ctx);
+ if (!sbi)
+ return;
+
+ erofs_free_dev_context(sbi->devs);
+ kfree(sbi->fsid);
+ kfree(sbi->domain_id);
+ kfree(sbi);
}
static const struct fs_context_operations erofs_context_ops = {
@@ -779,38 +767,35 @@ static const struct fs_context_operations erofs_context_ops = {
static int erofs_init_fs_context(struct fs_context *fc)
{
- struct erofs_fs_context *ctx;
+ struct erofs_sb_info *sbi;
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
+ sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
+ if (!sbi)
return -ENOMEM;
- ctx->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL);
- if (!ctx->devs) {
- kfree(ctx);
+
+ sbi->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL);
+ if (!sbi->devs) {
+ kfree(sbi);
return -ENOMEM;
}
- fc->fs_private = ctx;
+ fc->s_fs_info = sbi;
- idr_init(&ctx->devs->tree);
- init_rwsem(&ctx->devs->rwsem);
- erofs_default_options(ctx);
+ idr_init(&sbi->devs->tree);
+ init_rwsem(&sbi->devs->rwsem);
+ erofs_default_options(sbi);
fc->ops = &erofs_context_ops;
return 0;
}
static void erofs_kill_sb(struct super_block *sb)
{
- struct erofs_sb_info *sbi;
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
- if (erofs_is_fscache_mode(sb))
+ if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid)
kill_anon_super(sb);
else
kill_block_super(sb);
- sbi = EROFS_SB(sb);
- if (!sbi)
- return;
-
erofs_free_dev_context(sbi->devs);
fs_put_dax(sbi->dax_dev, NULL);
erofs_fscache_unregister_fs(sb);
@@ -874,7 +859,14 @@ static int __init erofs_module_init(void)
if (err)
goto deflate_err;
- erofs_pcpubuf_init();
+ err = z_erofs_zstd_init();
+ if (err)
+ goto zstd_err;
+
+ err = z_erofs_gbuf_init();
+ if (err)
+ goto gbuf_err;
+
err = z_erofs_init_zip_subsystem();
if (err)
goto zip_err;
@@ -894,6 +886,10 @@ fs_err:
sysfs_err:
z_erofs_exit_zip_subsystem();
zip_err:
+ z_erofs_gbuf_exit();
+gbuf_err:
+ z_erofs_zstd_exit();
+zstd_err:
z_erofs_deflate_exit();
deflate_err:
z_erofs_lzma_exit();
@@ -913,33 +909,32 @@ static void __exit erofs_module_exit(void)
erofs_exit_sysfs();
z_erofs_exit_zip_subsystem();
+ z_erofs_zstd_exit();
z_erofs_deflate_exit();
z_erofs_lzma_exit();
erofs_exit_shrinker();
kmem_cache_destroy(erofs_inode_cachep);
- erofs_pcpubuf_exit();
+ z_erofs_gbuf_exit();
}
static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct erofs_sb_info *sbi = EROFS_SB(sb);
- u64 id = 0;
-
- if (!erofs_is_fscache_mode(sb))
- id = huge_encode_dev(sb->s_bdev->bd_dev);
buf->f_type = sb->s_magic;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = sbi->total_blocks;
buf->f_bfree = buf->f_bavail = 0;
-
buf->f_files = ULLONG_MAX;
buf->f_ffree = ULLONG_MAX - sbi->inos;
-
buf->f_namelen = EROFS_NAME_LEN;
- buf->f_fsid = u64_to_fsid(id);
+ if (uuid_is_null(&sb->s_uuid))
+ buf->f_fsid = u64_to_fsid(erofs_is_fscache_mode(sb) ? 0 :
+ huge_encode_dev(sb->s_bdev->bd_dev));
+ else
+ buf->f_fsid = uuid_to_fsid(sb->s_uuid.b);
return 0;
}
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index e313c936351d..0a2454d8bcc1 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -31,7 +31,7 @@ static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m,
vi->inode_isize + vi->xattr_isize) +
lcn * sizeof(struct z_erofs_lcluster_index);
struct z_erofs_lcluster_index *di;
- unsigned int advise, type;
+ unsigned int advise;
m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb,
erofs_blknr(inode->i_sb, pos), EROFS_KMAP);
@@ -43,10 +43,8 @@ static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m,
di = m->kaddr + erofs_blkoff(inode->i_sb, pos);
advise = le16_to_cpu(di->di_advise);
- type = (advise >> Z_EROFS_LI_LCLUSTER_TYPE_BIT) &
- ((1 << Z_EROFS_LI_LCLUSTER_TYPE_BITS) - 1);
- switch (type) {
- case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
+ m->type = advise & Z_EROFS_LI_LCLUSTER_TYPE_MASK;
+ if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
m->clusterofs = 1 << vi->z_logical_clusterbits;
m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
if (m->delta[0] & Z_EROFS_LI_D0_CBLKCNT) {
@@ -60,24 +58,15 @@ static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m,
m->delta[0] = 1;
}
m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
- break;
- case Z_EROFS_LCLUSTER_TYPE_PLAIN:
- case Z_EROFS_LCLUSTER_TYPE_HEAD1:
- case Z_EROFS_LCLUSTER_TYPE_HEAD2:
- if (advise & Z_EROFS_LI_PARTIAL_REF)
- m->partialref = true;
+ } else {
+ m->partialref = !!(advise & Z_EROFS_LI_PARTIAL_REF);
m->clusterofs = le16_to_cpu(di->di_clusterofs);
if (m->clusterofs >= 1 << vi->z_logical_clusterbits) {
DBG_BUGON(1);
return -EFSCORRUPTED;
}
m->pblk = le32_to_cpu(di->di_u.blkaddr);
- break;
- default:
- DBG_BUGON(1);
- return -EOPNOTSUPP;
}
- m->type = type;
return 0;
}
@@ -561,7 +550,8 @@ static int z_erofs_do_map_blocks(struct inode *inode,
if ((flags & EROFS_GET_BLOCKS_FIEMAP) ||
((flags & EROFS_GET_BLOCKS_READMORE) &&
(map->m_algorithmformat == Z_EROFS_COMPRESSION_LZMA ||
- map->m_algorithmformat == Z_EROFS_COMPRESSION_DEFLATE) &&
+ map->m_algorithmformat == Z_EROFS_COMPRESSION_DEFLATE ||
+ map->m_algorithmformat == Z_EROFS_COMPRESSION_ZSTD) &&
map->m_llen >= i_blocksize(inode))) {
err = z_erofs_get_extent_decompressedlen(&m);
if (!err)
diff --git a/fs/erofs/utils.c b/fs/erofs/zutil.c
index 518bdd69c823..036024bce9f7 100644
--- a/fs/erofs/utils.c
+++ b/fs/erofs/zutil.c
@@ -5,16 +5,186 @@
*/
#include "internal.h"
-struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp)
+struct z_erofs_gbuf {
+ spinlock_t lock;
+ void *ptr;
+ struct page **pages;
+ unsigned int nrpages;
+};
+
+static struct z_erofs_gbuf *z_erofs_gbufpool, *z_erofs_rsvbuf;
+static unsigned int z_erofs_gbuf_count, z_erofs_gbuf_nrpages,
+ z_erofs_rsv_nrpages;
+
+module_param_named(global_buffers, z_erofs_gbuf_count, uint, 0444);
+module_param_named(reserved_pages, z_erofs_rsv_nrpages, uint, 0444);
+
+static atomic_long_t erofs_global_shrink_cnt; /* for all mounted instances */
+/* protected by 'erofs_sb_list_lock' */
+static unsigned int shrinker_run_no;
+
+/* protects the mounted 'erofs_sb_list' */
+static DEFINE_SPINLOCK(erofs_sb_list_lock);
+static LIST_HEAD(erofs_sb_list);
+static struct shrinker *erofs_shrinker_info;
+
+static unsigned int z_erofs_gbuf_id(void)
+{
+ return raw_smp_processor_id() % z_erofs_gbuf_count;
+}
+
+void *z_erofs_get_gbuf(unsigned int requiredpages)
+ __acquires(gbuf->lock)
+{
+ struct z_erofs_gbuf *gbuf;
+
+ gbuf = &z_erofs_gbufpool[z_erofs_gbuf_id()];
+ spin_lock(&gbuf->lock);
+ /* check if the buffer is too small */
+ if (requiredpages > gbuf->nrpages) {
+ spin_unlock(&gbuf->lock);
+ /* (for sparse checker) pretend gbuf->lock is still taken */
+ __acquire(gbuf->lock);
+ return NULL;
+ }
+ return gbuf->ptr;
+}
+
+void z_erofs_put_gbuf(void *ptr) __releases(gbuf->lock)
+{
+ struct z_erofs_gbuf *gbuf;
+
+ gbuf = &z_erofs_gbufpool[z_erofs_gbuf_id()];
+ DBG_BUGON(gbuf->ptr != ptr);
+ spin_unlock(&gbuf->lock);
+}
+
+int z_erofs_gbuf_growsize(unsigned int nrpages)
+{
+ static DEFINE_MUTEX(gbuf_resize_mutex);
+ struct page **tmp_pages = NULL;
+ struct z_erofs_gbuf *gbuf;
+ void *ptr, *old_ptr;
+ int last, i, j;
+
+ mutex_lock(&gbuf_resize_mutex);
+ /* avoid shrinking gbufs, since no idea how many fses rely on */
+ if (nrpages <= z_erofs_gbuf_nrpages) {
+ mutex_unlock(&gbuf_resize_mutex);
+ return 0;
+ }
+
+ for (i = 0; i < z_erofs_gbuf_count; ++i) {
+ gbuf = &z_erofs_gbufpool[i];
+ tmp_pages = kcalloc(nrpages, sizeof(*tmp_pages), GFP_KERNEL);
+ if (!tmp_pages)
+ goto out;
+
+ for (j = 0; j < gbuf->nrpages; ++j)
+ tmp_pages[j] = gbuf->pages[j];
+ do {
+ last = j;
+ j = alloc_pages_bulk_array(GFP_KERNEL, nrpages,
+ tmp_pages);
+ if (last == j)
+ goto out;
+ } while (j != nrpages);
+
+ ptr = vmap(tmp_pages, nrpages, VM_MAP, PAGE_KERNEL);
+ if (!ptr)
+ goto out;
+
+ spin_lock(&gbuf->lock);
+ kfree(gbuf->pages);
+ gbuf->pages = tmp_pages;
+ old_ptr = gbuf->ptr;
+ gbuf->ptr = ptr;
+ gbuf->nrpages = nrpages;
+ spin_unlock(&gbuf->lock);
+ if (old_ptr)
+ vunmap(old_ptr);
+ }
+ z_erofs_gbuf_nrpages = nrpages;
+out:
+ if (i < z_erofs_gbuf_count && tmp_pages) {
+ for (j = 0; j < nrpages; ++j)
+ if (tmp_pages[j] && tmp_pages[j] != gbuf->pages[j])
+ __free_page(tmp_pages[j]);
+ kfree(tmp_pages);
+ }
+ mutex_unlock(&gbuf_resize_mutex);
+ return i < z_erofs_gbuf_count ? -ENOMEM : 0;
+}
+
+int __init z_erofs_gbuf_init(void)
+{
+ unsigned int i, total = num_possible_cpus();
+
+ if (z_erofs_gbuf_count)
+ total = min(z_erofs_gbuf_count, total);
+ z_erofs_gbuf_count = total;
+
+ /* The last (special) global buffer is the reserved buffer */
+ total += !!z_erofs_rsv_nrpages;
+
+ z_erofs_gbufpool = kcalloc(total, sizeof(*z_erofs_gbufpool),
+ GFP_KERNEL);
+ if (!z_erofs_gbufpool)
+ return -ENOMEM;
+
+ if (z_erofs_rsv_nrpages) {
+ z_erofs_rsvbuf = &z_erofs_gbufpool[total - 1];
+ z_erofs_rsvbuf->pages = kcalloc(z_erofs_rsv_nrpages,
+ sizeof(*z_erofs_rsvbuf->pages), GFP_KERNEL);
+ if (!z_erofs_rsvbuf->pages) {
+ z_erofs_rsvbuf = NULL;
+ z_erofs_rsv_nrpages = 0;
+ }
+ }
+ for (i = 0; i < total; ++i)
+ spin_lock_init(&z_erofs_gbufpool[i].lock);
+ return 0;
+}
+
+void z_erofs_gbuf_exit(void)
+{
+ int i;
+
+ for (i = 0; i < z_erofs_gbuf_count + (!!z_erofs_rsvbuf); ++i) {
+ struct z_erofs_gbuf *gbuf = &z_erofs_gbufpool[i];
+
+ if (gbuf->ptr) {
+ vunmap(gbuf->ptr);
+ gbuf->ptr = NULL;
+ }
+
+ if (!gbuf->pages)
+ continue;
+
+ for (i = 0; i < gbuf->nrpages; ++i)
+ if (gbuf->pages[i])
+ put_page(gbuf->pages[i]);
+ kfree(gbuf->pages);
+ gbuf->pages = NULL;
+ }
+ kfree(z_erofs_gbufpool);
+}
+
+struct page *__erofs_allocpage(struct page **pagepool, gfp_t gfp, bool tryrsv)
{
struct page *page = *pagepool;
if (page) {
- DBG_BUGON(page_ref_count(page) != 1);
*pagepool = (struct page *)page_private(page);
- } else {
- page = alloc_page(gfp);
+ } else if (tryrsv && z_erofs_rsvbuf && z_erofs_rsvbuf->nrpages) {
+ spin_lock(&z_erofs_rsvbuf->lock);
+ if (z_erofs_rsvbuf->nrpages)
+ page = z_erofs_rsvbuf->pages[--z_erofs_rsvbuf->nrpages];
+ spin_unlock(&z_erofs_rsvbuf->lock);
}
+ if (!page)
+ page = alloc_page(gfp);
+ DBG_BUGON(page && page_ref_count(page) != 1);
return page;
}
@@ -24,14 +194,22 @@ void erofs_release_pages(struct page **pagepool)
struct page *page = *pagepool;
*pagepool = (struct page *)page_private(page);
+ /* try to fill reserved global pool first */
+ if (z_erofs_rsvbuf && z_erofs_rsvbuf->nrpages <
+ z_erofs_rsv_nrpages) {
+ spin_lock(&z_erofs_rsvbuf->lock);
+ if (z_erofs_rsvbuf->nrpages < z_erofs_rsv_nrpages) {
+ z_erofs_rsvbuf->pages[z_erofs_rsvbuf->nrpages++]
+ = page;
+ spin_unlock(&z_erofs_rsvbuf->lock);
+ continue;
+ }
+ spin_unlock(&z_erofs_rsvbuf->lock);
+ }
put_page(page);
}
}
-#ifdef CONFIG_EROFS_FS_ZIP
-/* global shrink count (for all mounted EROFS instances) */
-static atomic_long_t erofs_global_shrink_cnt;
-
static bool erofs_workgroup_get(struct erofs_workgroup *grp)
{
if (lockref_get_not_zero(&grp->lockref))
@@ -171,13 +349,6 @@ static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
return freed;
}
-/* protected by 'erofs_sb_list_lock' */
-static unsigned int shrinker_run_no;
-
-/* protects the mounted 'erofs_sb_list' */
-static DEFINE_SPINLOCK(erofs_sb_list_lock);
-static LIST_HEAD(erofs_sb_list);
-
void erofs_shrinker_register(struct super_block *sb)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
@@ -264,8 +435,6 @@ static unsigned long erofs_shrink_scan(struct shrinker *shrink,
return freed;
}
-static struct shrinker *erofs_shrinker_info;
-
int __init erofs_init_shrinker(void)
{
erofs_shrinker_info = shrinker_alloc(0, "erofs-shrinker");
@@ -274,9 +443,7 @@ int __init erofs_init_shrinker(void)
erofs_shrinker_info->count_objects = erofs_shrink_count;
erofs_shrinker_info->scan_objects = erofs_shrink_scan;
-
shrinker_register(erofs_shrinker_info);
-
return 0;
}
@@ -284,4 +451,3 @@ void erofs_exit_shrinker(void)
{
shrinker_free(erofs_shrinker_info);
}
-#endif /* !CONFIG_EROFS_FS_ZIP */
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 882b89edc52a..f53ca4f7fced 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -980,6 +980,34 @@ static __poll_t __ep_eventpoll_poll(struct file *file, poll_table *wait, int dep
}
/*
+ * The ffd.file pointer may be in the process of being torn down due to
+ * being closed, but we may not have finished eventpoll_release() yet.
+ *
+ * Normally, even with the atomic_long_inc_not_zero, the file may have
+ * been free'd and then gotten re-allocated to something else (since
+ * files are not RCU-delayed, they are SLAB_TYPESAFE_BY_RCU).
+ *
+ * But for epoll, users hold the ep->mtx mutex, and as such any file in
+ * the process of being free'd will block in eventpoll_release_file()
+ * and thus the underlying file allocation will not be free'd, and the
+ * file re-use cannot happen.
+ *
+ * For the same reason we can avoid a rcu_read_lock() around the
+ * operation - 'ffd.file' cannot go away even if the refcount has
+ * reached zero (but we must still not call out to ->poll() functions
+ * etc).
+ */
+static struct file *epi_fget(const struct epitem *epi)
+{
+ struct file *file;
+
+ file = epi->ffd.file;
+ if (!atomic_long_inc_not_zero(&file->f_count))
+ file = NULL;
+ return file;
+}
+
+/*
* Differs from ep_eventpoll_poll() in that internal callers already have
* the ep->mtx so we need to start from depth=1, such that mutex_lock_nested()
* is correctly annotated.
@@ -987,14 +1015,22 @@ static __poll_t __ep_eventpoll_poll(struct file *file, poll_table *wait, int dep
static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt,
int depth)
{
- struct file *file = epi->ffd.file;
+ struct file *file = epi_fget(epi);
__poll_t res;
+ /*
+ * We could return EPOLLERR | EPOLLHUP or something, but let's
+ * treat this more as "file doesn't exist, poll didn't happen".
+ */
+ if (!file)
+ return 0;
+
pt->_key = epi->event.events;
if (!is_file_epoll(file))
res = vfs_poll(file, pt);
else
res = __ep_eventpoll_poll(file, pt, depth);
+ fput(file);
return res & epi->event.events;
}
diff --git a/fs/exec.c b/fs/exec.c
index ff6f26671cfc..40073142288f 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -67,6 +67,7 @@
#include <linux/time_namespace.h>
#include <linux/user_events.h>
#include <linux/rseq.h>
+#include <linux/ksm.h>
#include <linux/uaccess.h>
#include <asm/mmu_context.h>
@@ -268,6 +269,14 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
}
/*
+ * Need to be called with mmap write lock
+ * held, to avoid race with ksmd.
+ */
+ err = ksm_execve(mm);
+ if (err)
+ goto err_ksm;
+
+ /*
* Place the stack at the largest stack address the architecture
* supports. Later, we'll move this to an appropriate place. We don't
* use STACK_TOP because that can depend on attributes which aren't
@@ -288,6 +297,8 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
bprm->p = vma->vm_end - sizeof(void *);
return 0;
err:
+ ksm_exit(mm);
+err_ksm:
mmap_write_unlock(mm);
err_free:
bprm->vma = NULL;
@@ -895,6 +906,7 @@ int transfer_args_to_stack(struct linux_binprm *bprm,
goto out;
}
+ bprm->exec += *sp_location - MAX_ARG_PAGES * PAGE_SIZE;
*sp_location = sp;
out:
@@ -1267,6 +1279,14 @@ int begin_new_exec(struct linux_binprm * bprm)
return retval;
/*
+ * This tracepoint marks the point before flushing the old exec where
+ * the current task is still unchanged, but errors are fatal (point of
+ * no return). The later "sched_process_exec" tracepoint is called after
+ * the current task has successfully switched to the new exec.
+ */
+ trace_sched_prepare_exec(current, bprm);
+
+ /*
* Ensure all future errors are fatal.
*/
bprm->point_of_no_return = true;
diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c
index 077944d3c2c0..84572e11cc05 100644
--- a/fs/exfat/dir.c
+++ b/fs/exfat/dir.c
@@ -420,6 +420,7 @@ static void exfat_set_entry_type(struct exfat_dentry *ep, unsigned int type)
static void exfat_init_stream_entry(struct exfat_dentry *ep,
unsigned int start_clu, unsigned long long size)
{
+ memset(ep, 0, sizeof(*ep));
exfat_set_entry_type(ep, TYPE_STREAM);
if (size == 0)
ep->dentry.stream.flags = ALLOC_FAT_CHAIN;
@@ -457,6 +458,7 @@ void exfat_init_dir_entry(struct exfat_entry_set_cache *es,
struct exfat_dentry *ep;
ep = exfat_get_dentry_cached(es, ES_IDX_FILE);
+ memset(ep, 0, sizeof(*ep));
exfat_set_entry_type(ep, type);
exfat_set_entry_time(sbi, ts,
&ep->dentry.file.create_tz,
diff --git a/fs/exfat/file.c b/fs/exfat/file.c
index cc00f1a7a1e1..9adfc38ca7da 100644
--- a/fs/exfat/file.c
+++ b/fs/exfat/file.c
@@ -51,7 +51,7 @@ static int exfat_cont_expand(struct inode *inode, loff_t size)
clu.flags = ei->flags;
ret = exfat_alloc_cluster(inode, new_num_clusters - num_clusters,
- &clu, IS_DIRSYNC(inode));
+ &clu, inode_needs_sync(inode));
if (ret)
return ret;
@@ -77,12 +77,11 @@ out:
ei->i_size_aligned = round_up(size, sb->s_blocksize);
ei->i_size_ondisk = ei->i_size_aligned;
inode->i_blocks = round_up(size, sbi->cluster_size) >> 9;
+ mark_inode_dirty(inode);
- if (IS_DIRSYNC(inode))
+ if (IS_SYNC(inode))
return write_inode_now(inode, 1);
- mark_inode_dirty(inode);
-
return 0;
free_clu:
diff --git a/fs/ext4/acl.h b/fs/ext4/acl.h
index ef4c19e5f570..0c5a79c3b5d4 100644
--- a/fs/ext4/acl.h
+++ b/fs/ext4/acl.h
@@ -68,11 +68,6 @@ extern int ext4_init_acl(handle_t *, struct inode *, struct inode *);
static inline int
ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
{
- /* usually, the umask is applied by posix_acl_create(), but if
- ext4 ACL support is disabled at compile time, we need to do
- it here, because posix_acl_create() will never be called */
- inode->i_mode &= ~current_umask();
-
return 0;
}
#endif /* CONFIG_EXT4_FS_POSIX_ACL */
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 8d126654019e..983dad8c07ec 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -213,11 +213,14 @@ enum criteria {
#define EXT4_MB_USE_RESERVED 0x2000
/* Do strict check for free blocks while retrying block allocation */
#define EXT4_MB_STRICT_CHECK 0x4000
-/* Large fragment size list lookup succeeded at least once for cr = 0 */
+/* Large fragment size list lookup succeeded at least once for
+ * CR_POWER2_ALIGNED */
#define EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED 0x8000
-/* Avg fragment size rb tree lookup succeeded at least once for cr = 1 */
+/* Avg fragment size rb tree lookup succeeded at least once for
+ * CR_GOAL_LEN_FAST */
#define EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED 0x00010000
-/* Avg fragment size rb tree lookup succeeded at least once for cr = 1.5 */
+/* Avg fragment size rb tree lookup succeeded at least once for
+ * CR_BEST_AVAIL_LEN */
#define EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED 0x00020000
struct ext4_allocation_request {
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index e57054bdc5fd..e067f2dd0335 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3402,9 +3402,10 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
struct ext4_extent *ex, *abut_ex;
ext4_lblk_t ee_block, eof_block;
unsigned int ee_len, depth, map_len = map->m_len;
- int allocated = 0, max_zeroout = 0;
int err = 0;
int split_flag = EXT4_EXT_DATA_VALID2;
+ int allocated = 0;
+ unsigned int max_zeroout = 0;
ext_debug(inode, "logical block %llu, max_blocks %u\n",
(unsigned long long)map->m_lblk, map_len);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 54d6ff22585c..c89e434db6b7 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -844,8 +844,7 @@ static int ext4_sample_last_mounted(struct super_block *sb,
if (err)
goto out_journal;
lock_buffer(sbi->s_sbh);
- strncpy(sbi->s_es->s_last_mounted, cp,
- sizeof(sbi->s_es->s_last_mounted));
+ strtomem_pad(sbi->s_es->s_last_mounted, cp, 0);
ext4_superblock_csum_set(sb);
unlock_buffer(sbi->s_sbh);
ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
@@ -885,8 +884,7 @@ static int ext4_file_open(struct inode *inode, struct file *filp)
return ret;
}
- filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC |
- FMODE_DIO_PARALLEL_WRITE;
+ filp->f_mode |= FMODE_NOWAIT | FMODE_CAN_ODIRECT;
return dquot_file_open(inode, filp);
}
@@ -938,7 +936,6 @@ const struct file_operations ext4_file_operations = {
.compat_ioctl = ext4_compat_ioctl,
#endif
.mmap = ext4_file_mmap,
- .mmap_supported_flags = MAP_SYNC,
.open = ext4_file_open,
.release = ext4_release_file,
.fsync = ext4_sync_file,
@@ -946,6 +943,8 @@ const struct file_operations ext4_file_operations = {
.splice_read = ext4_file_splice_read,
.splice_write = iter_file_splice_write,
.fallocate = ext4_fallocate,
+ .fop_flags = FOP_MMAP_SYNC | FOP_BUFFER_RASYNC |
+ FOP_DIO_PARALLEL_WRITE,
};
const struct inode_operations ext4_file_inode_operations = {
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 537803250ca9..4bae9ccf5fe0 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1865,7 +1865,7 @@ static int mpage_submit_folio(struct mpage_da_data *mpd, struct folio *folio)
len = folio_size(folio);
if (folio_pos(folio) + len > size &&
!ext4_verity_in_progress(mpd->inode))
- len = size & ~PAGE_MASK;
+ len = size & (len - 1);
err = ext4_bio_write_folio(&mpd->io_submit, folio, len);
if (!err)
mpd->wbc->nr_to_write--;
@@ -2334,7 +2334,7 @@ static int mpage_journal_page_buffers(handle_t *handle,
if (folio_pos(folio) + len > size &&
!ext4_verity_in_progress(inode))
- len = size - folio_pos(folio);
+ len = size & (len - 1);
return ext4_journal_folio_buffers(handle, folio, len);
}
@@ -2887,9 +2887,6 @@ retry:
if (IS_ERR(folio))
return PTR_ERR(folio);
- /* In case writeback began while the folio was unlocked */
- folio_wait_stable(folio);
-
#ifdef CONFIG_FS_ENCRYPTION
ret = ext4_block_write_begin(folio, pos, len, ext4_da_get_block_prep);
#else
@@ -3530,7 +3527,6 @@ static const struct address_space_operations ext4_aops = {
.bmap = ext4_bmap,
.invalidate_folio = ext4_invalidate_folio,
.release_folio = ext4_release_folio,
- .direct_IO = noop_direct_IO,
.migrate_folio = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_folio = generic_error_remove_folio,
@@ -3547,7 +3543,6 @@ static const struct address_space_operations ext4_journalled_aops = {
.bmap = ext4_bmap,
.invalidate_folio = ext4_journalled_invalidate_folio,
.release_folio = ext4_release_folio,
- .direct_IO = noop_direct_IO,
.migrate_folio = buffer_migrate_folio_norefs,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_folio = generic_error_remove_folio,
@@ -3564,7 +3559,6 @@ static const struct address_space_operations ext4_da_aops = {
.bmap = ext4_bmap,
.invalidate_folio = ext4_invalidate_folio,
.release_folio = ext4_release_folio,
- .direct_IO = noop_direct_IO,
.migrate_folio = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate,
.error_remove_folio = generic_error_remove_folio,
@@ -3573,7 +3567,6 @@ static const struct address_space_operations ext4_da_aops = {
static const struct address_space_operations ext4_dax_aops = {
.writepages = ext4_dax_writepages,
- .direct_IO = noop_direct_IO,
.dirty_folio = noop_dirty_folio,
.bmap = ext4_bmap,
.swap_activate = ext4_iomap_swap_activate,
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 7160a71044c8..dab7acd49709 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -1150,9 +1150,8 @@ static int ext4_ioctl_getlabel(struct ext4_sb_info *sbi, char __user *user_label
*/
BUILD_BUG_ON(EXT4_LABEL_MAX >= FSLABEL_MAX);
- memset(label, 0, sizeof(label));
lock_buffer(sbi->s_sbh);
- strncpy(label, sbi->s_es->s_volume_name, EXT4_LABEL_MAX);
+ strscpy_pad(label, sbi->s_es->s_volume_name);
unlock_buffer(sbi->s_sbh);
if (copy_to_user(user_label, label, sizeof(label)))
diff --git a/fs/ext4/mballoc-test.c b/fs/ext4/mballoc-test.c
index 044ca5238f41..bb2a223b207c 100644
--- a/fs/ext4/mballoc-test.c
+++ b/fs/ext4/mballoc-test.c
@@ -30,7 +30,31 @@ struct mbt_ext4_super_block {
#define MBT_CTX(_sb) (&MBT_SB(_sb)->mbt_ctx)
#define MBT_GRP_CTX(_sb, _group) (&MBT_CTX(_sb)->grp_ctx[_group])
+static struct inode *mbt_alloc_inode(struct super_block *sb)
+{
+ struct ext4_inode_info *ei;
+
+ ei = kmalloc(sizeof(struct ext4_inode_info), GFP_KERNEL);
+ if (!ei)
+ return NULL;
+
+ INIT_LIST_HEAD(&ei->i_orphan);
+ init_rwsem(&ei->xattr_sem);
+ init_rwsem(&ei->i_data_sem);
+ inode_init_once(&ei->vfs_inode);
+ ext4_fc_init_inode(&ei->vfs_inode);
+
+ return &ei->vfs_inode;
+}
+
+static void mbt_free_inode(struct inode *inode)
+{
+ kfree(EXT4_I(inode));
+}
+
static const struct super_operations mbt_sops = {
+ .alloc_inode = mbt_alloc_inode,
+ .free_inode = mbt_free_inode,
};
static void mbt_kill_sb(struct super_block *sb)
@@ -859,6 +883,56 @@ static void test_mb_free_blocks(struct kunit *test)
ext4_mb_unload_buddy(&e4b);
}
+#define COUNT_FOR_ESTIMATE 100000
+static void test_mb_mark_used_cost(struct kunit *test)
+{
+ struct ext4_buddy e4b;
+ struct super_block *sb = (struct super_block *)test->priv;
+ struct ext4_free_extent ex;
+ int ret;
+ struct test_range ranges[TEST_RANGE_COUNT];
+ int i, j;
+ unsigned long start, end, all = 0;
+
+ /* buddy cache assumes that each page contains at least one block */
+ if (sb->s_blocksize > PAGE_SIZE)
+ kunit_skip(test, "blocksize exceeds pagesize");
+
+ ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ex.fe_group = TEST_GOAL_GROUP;
+ for (j = 0; j < COUNT_FOR_ESTIMATE; j++) {
+ mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
+ start = jiffies;
+ for (i = 0; i < TEST_RANGE_COUNT; i++) {
+ if (ranges[i].len == 0)
+ continue;
+
+ ex.fe_start = ranges[i].start;
+ ex.fe_len = ranges[i].len;
+ ext4_lock_group(sb, TEST_GOAL_GROUP);
+ mb_mark_used(&e4b, &ex);
+ ext4_unlock_group(sb, TEST_GOAL_GROUP);
+ }
+ end = jiffies;
+ all += (end - start);
+
+ for (i = 0; i < TEST_RANGE_COUNT; i++) {
+ if (ranges[i].len == 0)
+ continue;
+
+ ext4_lock_group(sb, TEST_GOAL_GROUP);
+ mb_free_blocks(NULL, &e4b, ranges[i].start,
+ ranges[i].len);
+ ext4_unlock_group(sb, TEST_GOAL_GROUP);
+ }
+ }
+
+ kunit_info(test, "costed jiffies %lu\n", all);
+ ext4_mb_unload_buddy(&e4b);
+}
+
static const struct mbt_ext4_block_layout mbt_test_layouts[] = {
{
.blocksize_bits = 10,
@@ -901,6 +975,8 @@ static struct kunit_case mbt_test_cases[] = {
KUNIT_CASE_PARAM(test_mb_mark_used, mbt_layouts_gen_params),
KUNIT_CASE_PARAM(test_mb_free_blocks, mbt_layouts_gen_params),
KUNIT_CASE_PARAM(test_mark_diskspace_used, mbt_layouts_gen_params),
+ KUNIT_CASE_PARAM_ATTR(test_mb_mark_used_cost, mbt_layouts_gen_params,
+ { .speed = KUNIT_SPEED_SLOW }),
{}
};
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 12b3f196010b..9dda9cd68ab2 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -831,6 +831,8 @@ static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len)
return 0;
if (order == MB_NUM_ORDERS(sb))
order--;
+ if (WARN_ON_ONCE(order > MB_NUM_ORDERS(sb)))
+ order = MB_NUM_ORDERS(sb) - 1;
return order;
}
@@ -1008,6 +1010,8 @@ static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context
* goal length.
*/
order = fls(ac->ac_g_ex.fe_len) - 1;
+ if (WARN_ON_ONCE(order - 1 > MB_NUM_ORDERS(ac->ac_sb)))
+ order = MB_NUM_ORDERS(ac->ac_sb);
min_order = order - sbi->s_mb_best_avail_max_trim_order;
if (min_order < 0)
min_order = 0;
@@ -1076,23 +1080,11 @@ static inline int should_optimize_scan(struct ext4_allocation_context *ac)
}
/*
- * Return next linear group for allocation. If linear traversal should not be
- * performed, this function just returns the same group
+ * Return next linear group for allocation.
*/
static ext4_group_t
-next_linear_group(struct ext4_allocation_context *ac, ext4_group_t group,
- ext4_group_t ngroups)
+next_linear_group(ext4_group_t group, ext4_group_t ngroups)
{
- if (!should_optimize_scan(ac))
- goto inc_and_return;
-
- if (ac->ac_groups_linear_remaining) {
- ac->ac_groups_linear_remaining--;
- goto inc_and_return;
- }
-
- return group;
-inc_and_return:
/*
* Artificially restricted ngroups for non-extent
* files makes group > ngroups possible on first loop.
@@ -1118,8 +1110,19 @@ static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
{
*new_cr = ac->ac_criteria;
- if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) {
- *group = next_linear_group(ac, *group, ngroups);
+ if (!should_optimize_scan(ac)) {
+ *group = next_linear_group(*group, ngroups);
+ return;
+ }
+
+ /*
+ * Optimized scanning can return non adjacent groups which can cause
+ * seek overhead for rotational disks. So try few linear groups before
+ * trying optimized scan.
+ */
+ if (ac->ac_groups_linear_remaining) {
+ *group = next_linear_group(*group, ngroups);
+ ac->ac_groups_linear_remaining--;
return;
}
@@ -1131,8 +1134,9 @@ static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
ext4_mb_choose_next_group_best_avail(ac, new_cr, group);
} else {
/*
- * TODO: For CR=2, we can arrange groups in an rb tree sorted by
- * bb_free. But until that happens, we should never come here.
+ * TODO: For CR_GOAL_LEN_SLOW, we can arrange groups in an
+ * rb tree sorted by bb_free. But until that happens, we should
+ * never come here.
*/
WARN_ON(1);
}
@@ -1270,7 +1274,7 @@ static void mb_regenerate_buddy(struct ext4_buddy *e4b)
* for this page; do not hold this lock when calling this routine!
*/
-static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
+static int ext4_mb_init_cache(struct folio *folio, char *incore, gfp_t gfp)
{
ext4_group_t ngroups;
unsigned int blocksize;
@@ -1288,13 +1292,13 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
char *bitmap;
struct ext4_group_info *grinfo;
- inode = page->mapping->host;
+ inode = folio->mapping->host;
sb = inode->i_sb;
ngroups = ext4_get_groups_count(sb);
blocksize = i_blocksize(inode);
blocks_per_page = PAGE_SIZE / blocksize;
- mb_debug(sb, "init page %lu\n", page->index);
+ mb_debug(sb, "init folio %lu\n", folio->index);
groups_per_page = blocks_per_page >> 1;
if (groups_per_page == 0)
@@ -1309,9 +1313,9 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
} else
bh = &bhs;
- first_group = page->index * blocks_per_page / 2;
+ first_group = folio->index * blocks_per_page / 2;
- /* read all groups the page covers into the cache */
+ /* read all groups the folio covers into the cache */
for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
if (group >= ngroups)
break;
@@ -1322,10 +1326,11 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
/*
* If page is uptodate then we came here after online resize
* which added some new uninitialized group info structs, so
- * we must skip all initialized uptodate buddies on the page,
+ * we must skip all initialized uptodate buddies on the folio,
* which may be currently in use by an allocating task.
*/
- if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
+ if (folio_test_uptodate(folio) &&
+ !EXT4_MB_GRP_NEED_INIT(grinfo)) {
bh[i] = NULL;
continue;
}
@@ -1349,7 +1354,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
err = err2;
}
- first_block = page->index * blocks_per_page;
+ first_block = folio->index * blocks_per_page;
for (i = 0; i < blocks_per_page; i++) {
group = (first_block + i) >> 1;
if (group >= ngroups)
@@ -1370,7 +1375,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
* above
*
*/
- data = page_address(page) + (i * blocksize);
+ data = folio_address(folio) + (i * blocksize);
bitmap = bh[group - first_group]->b_data;
/*
@@ -1385,8 +1390,8 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
if ((first_block + i) & 1) {
/* this is block of buddy */
BUG_ON(incore == NULL);
- mb_debug(sb, "put buddy for group %u in page %lu/%x\n",
- group, page->index, i * blocksize);
+ mb_debug(sb, "put buddy for group %u in folio %lu/%x\n",
+ group, folio->index, i * blocksize);
trace_ext4_mb_buddy_bitmap_load(sb, group);
grinfo->bb_fragments = 0;
memset(grinfo->bb_counters, 0,
@@ -1404,8 +1409,8 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
} else {
/* this is block of bitmap */
BUG_ON(incore != NULL);
- mb_debug(sb, "put bitmap for group %u in page %lu/%x\n",
- group, page->index, i * blocksize);
+ mb_debug(sb, "put bitmap for group %u in folio %lu/%x\n",
+ group, folio->index, i * blocksize);
trace_ext4_mb_bitmap_load(sb, group);
/* see comments in ext4_mb_put_pa() */
@@ -1423,7 +1428,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
incore = data;
}
}
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
out:
if (bh) {
@@ -1439,7 +1444,7 @@ out:
* Lock the buddy and bitmap pages. This make sure other parallel init_group
* on the same buddy page doesn't happen whild holding the buddy page lock.
* Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
- * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
+ * are on the same page e4b->bd_buddy_folio is NULL and return value is 0.
*/
static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
@@ -1447,10 +1452,10 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
int block, pnum, poff;
int blocks_per_page;
- struct page *page;
+ struct folio *folio;
- e4b->bd_buddy_page = NULL;
- e4b->bd_bitmap_page = NULL;
+ e4b->bd_buddy_folio = NULL;
+ e4b->bd_bitmap_folio = NULL;
blocks_per_page = PAGE_SIZE / sb->s_blocksize;
/*
@@ -1461,12 +1466,13 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
block = group * 2;
pnum = block / blocks_per_page;
poff = block % blocks_per_page;
- page = find_or_create_page(inode->i_mapping, pnum, gfp);
- if (!page)
- return -ENOMEM;
- BUG_ON(page->mapping != inode->i_mapping);
- e4b->bd_bitmap_page = page;
- e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
+ folio = __filemap_get_folio(inode->i_mapping, pnum,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ BUG_ON(folio->mapping != inode->i_mapping);
+ e4b->bd_bitmap_folio = folio;
+ e4b->bd_bitmap = folio_address(folio) + (poff * sb->s_blocksize);
if (blocks_per_page >= 2) {
/* buddy and bitmap are on the same page */
@@ -1474,23 +1480,24 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
}
/* blocks_per_page == 1, hence we need another page for the buddy */
- page = find_or_create_page(inode->i_mapping, block + 1, gfp);
- if (!page)
- return -ENOMEM;
- BUG_ON(page->mapping != inode->i_mapping);
- e4b->bd_buddy_page = page;
+ folio = __filemap_get_folio(inode->i_mapping, block + 1,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+ BUG_ON(folio->mapping != inode->i_mapping);
+ e4b->bd_buddy_folio = folio;
return 0;
}
static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
{
- if (e4b->bd_bitmap_page) {
- unlock_page(e4b->bd_bitmap_page);
- put_page(e4b->bd_bitmap_page);
+ if (e4b->bd_bitmap_folio) {
+ folio_unlock(e4b->bd_bitmap_folio);
+ folio_put(e4b->bd_bitmap_folio);
}
- if (e4b->bd_buddy_page) {
- unlock_page(e4b->bd_buddy_page);
- put_page(e4b->bd_buddy_page);
+ if (e4b->bd_buddy_folio) {
+ folio_unlock(e4b->bd_buddy_folio);
+ folio_put(e4b->bd_buddy_folio);
}
}
@@ -1505,7 +1512,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
struct ext4_group_info *this_grp;
struct ext4_buddy e4b;
- struct page *page;
+ struct folio *folio;
int ret = 0;
might_sleep();
@@ -1532,16 +1539,16 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
goto err;
}
- page = e4b.bd_bitmap_page;
- ret = ext4_mb_init_cache(page, NULL, gfp);
+ folio = e4b.bd_bitmap_folio;
+ ret = ext4_mb_init_cache(folio, NULL, gfp);
if (ret)
goto err;
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
ret = -EIO;
goto err;
}
- if (e4b.bd_buddy_page == NULL) {
+ if (e4b.bd_buddy_folio == NULL) {
/*
* If both the bitmap and buddy are in
* the same page we don't need to force
@@ -1551,11 +1558,11 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
goto err;
}
/* init buddy cache */
- page = e4b.bd_buddy_page;
- ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
+ folio = e4b.bd_buddy_folio;
+ ret = ext4_mb_init_cache(folio, e4b.bd_bitmap, gfp);
if (ret)
goto err;
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
ret = -EIO;
goto err;
}
@@ -1577,7 +1584,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
int block;
int pnum;
int poff;
- struct page *page;
+ struct folio *folio;
int ret;
struct ext4_group_info *grp;
struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -1595,8 +1602,8 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
e4b->bd_info = grp;
e4b->bd_sb = sb;
e4b->bd_group = group;
- e4b->bd_buddy_page = NULL;
- e4b->bd_bitmap_page = NULL;
+ e4b->bd_buddy_folio = NULL;
+ e4b->bd_bitmap_folio = NULL;
if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
/*
@@ -1617,102 +1624,103 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
pnum = block / blocks_per_page;
poff = block % blocks_per_page;
- /* we could use find_or_create_page(), but it locks page
- * what we'd like to avoid in fast path ... */
- page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
- if (page == NULL || !PageUptodate(page)) {
- if (page)
+ /* Avoid locking the folio in the fast path ... */
+ folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0);
+ if (IS_ERR(folio) || !folio_test_uptodate(folio)) {
+ if (!IS_ERR(folio))
/*
- * drop the page reference and try
- * to get the page with lock. If we
+ * drop the folio reference and try
+ * to get the folio with lock. If we
* are not uptodate that implies
- * somebody just created the page but
- * is yet to initialize the same. So
+ * somebody just created the folio but
+ * is yet to initialize it. So
* wait for it to initialize.
*/
- put_page(page);
- page = find_or_create_page(inode->i_mapping, pnum, gfp);
- if (page) {
- if (WARN_RATELIMIT(page->mapping != inode->i_mapping,
- "ext4: bitmap's paging->mapping != inode->i_mapping\n")) {
+ folio_put(folio);
+ folio = __filemap_get_folio(inode->i_mapping, pnum,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
+ if (!IS_ERR(folio)) {
+ if (WARN_RATELIMIT(folio->mapping != inode->i_mapping,
+ "ext4: bitmap's mapping != inode->i_mapping\n")) {
/* should never happen */
- unlock_page(page);
+ folio_unlock(folio);
ret = -EINVAL;
goto err;
}
- if (!PageUptodate(page)) {
- ret = ext4_mb_init_cache(page, NULL, gfp);
+ if (!folio_test_uptodate(folio)) {
+ ret = ext4_mb_init_cache(folio, NULL, gfp);
if (ret) {
- unlock_page(page);
+ folio_unlock(folio);
goto err;
}
- mb_cmp_bitmaps(e4b, page_address(page) +
+ mb_cmp_bitmaps(e4b, folio_address(folio) +
(poff * sb->s_blocksize));
}
- unlock_page(page);
+ folio_unlock(folio);
}
}
- if (page == NULL) {
- ret = -ENOMEM;
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
goto err;
}
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
ret = -EIO;
goto err;
}
- /* Pages marked accessed already */
- e4b->bd_bitmap_page = page;
- e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
+ /* Folios marked accessed already */
+ e4b->bd_bitmap_folio = folio;
+ e4b->bd_bitmap = folio_address(folio) + (poff * sb->s_blocksize);
block++;
pnum = block / blocks_per_page;
poff = block % blocks_per_page;
- page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
- if (page == NULL || !PageUptodate(page)) {
- if (page)
- put_page(page);
- page = find_or_create_page(inode->i_mapping, pnum, gfp);
- if (page) {
- if (WARN_RATELIMIT(page->mapping != inode->i_mapping,
- "ext4: buddy bitmap's page->mapping != inode->i_mapping\n")) {
+ folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0);
+ if (IS_ERR(folio) || !folio_test_uptodate(folio)) {
+ if (!IS_ERR(folio))
+ folio_put(folio);
+ folio = __filemap_get_folio(inode->i_mapping, pnum,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp);
+ if (!IS_ERR(folio)) {
+ if (WARN_RATELIMIT(folio->mapping != inode->i_mapping,
+ "ext4: buddy bitmap's mapping != inode->i_mapping\n")) {
/* should never happen */
- unlock_page(page);
+ folio_unlock(folio);
ret = -EINVAL;
goto err;
}
- if (!PageUptodate(page)) {
- ret = ext4_mb_init_cache(page, e4b->bd_bitmap,
+ if (!folio_test_uptodate(folio)) {
+ ret = ext4_mb_init_cache(folio, e4b->bd_bitmap,
gfp);
if (ret) {
- unlock_page(page);
+ folio_unlock(folio);
goto err;
}
}
- unlock_page(page);
+ folio_unlock(folio);
}
}
- if (page == NULL) {
- ret = -ENOMEM;
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
goto err;
}
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
ret = -EIO;
goto err;
}
- /* Pages marked accessed already */
- e4b->bd_buddy_page = page;
- e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
+ /* Folios marked accessed already */
+ e4b->bd_buddy_folio = folio;
+ e4b->bd_buddy = folio_address(folio) + (poff * sb->s_blocksize);
return 0;
err:
- if (page)
- put_page(page);
- if (e4b->bd_bitmap_page)
- put_page(e4b->bd_bitmap_page);
+ if (!IS_ERR_OR_NULL(folio))
+ folio_put(folio);
+ if (e4b->bd_bitmap_folio)
+ folio_put(e4b->bd_bitmap_folio);
e4b->bd_buddy = NULL;
e4b->bd_bitmap = NULL;
@@ -1727,10 +1735,10 @@ static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
{
- if (e4b->bd_bitmap_page)
- put_page(e4b->bd_bitmap_page);
- if (e4b->bd_buddy_page)
- put_page(e4b->bd_buddy_page);
+ if (e4b->bd_bitmap_folio)
+ folio_put(e4b->bd_bitmap_folio);
+ if (e4b->bd_buddy_folio)
+ folio_put(e4b->bd_buddy_folio);
}
@@ -2040,13 +2048,12 @@ static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
int ord;
int mlen = 0;
int max = 0;
- int cur;
int start = ex->fe_start;
int len = ex->fe_len;
unsigned ret = 0;
int len0 = len;
void *buddy;
- bool split = false;
+ int ord_start, ord_end;
BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
BUG_ON(e4b->bd_group != ex->fe_group);
@@ -2071,16 +2078,12 @@ static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
/* let's maintain buddy itself */
while (len) {
- if (!split)
- ord = mb_find_order_for_block(e4b, start);
+ ord = mb_find_order_for_block(e4b, start);
if (((start >> ord) << ord) == start && len >= (1 << ord)) {
/* the whole chunk may be allocated at once! */
mlen = 1 << ord;
- if (!split)
- buddy = mb_find_buddy(e4b, ord, &max);
- else
- split = false;
+ buddy = mb_find_buddy(e4b, ord, &max);
BUG_ON((start >> ord) >= max);
mb_set_bit(start >> ord, buddy);
e4b->bd_info->bb_counters[ord]--;
@@ -2094,20 +2097,29 @@ static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
if (ret == 0)
ret = len | (ord << 16);
- /* we have to split large buddy */
BUG_ON(ord <= 0);
buddy = mb_find_buddy(e4b, ord, &max);
mb_set_bit(start >> ord, buddy);
e4b->bd_info->bb_counters[ord]--;
- ord--;
- cur = (start >> ord) & ~1U;
- buddy = mb_find_buddy(e4b, ord, &max);
- mb_clear_bit(cur, buddy);
- mb_clear_bit(cur + 1, buddy);
- e4b->bd_info->bb_counters[ord]++;
- e4b->bd_info->bb_counters[ord]++;
- split = true;
+ ord_start = (start >> ord) << ord;
+ ord_end = ord_start + (1 << ord);
+ /* first chunk */
+ if (start > ord_start)
+ ext4_mb_mark_free_simple(e4b->bd_sb, e4b->bd_buddy,
+ ord_start, start - ord_start,
+ e4b->bd_info);
+
+ /* last chunk */
+ if (start + len < ord_end) {
+ ext4_mb_mark_free_simple(e4b->bd_sb, e4b->bd_buddy,
+ start + len,
+ ord_end - (start + len),
+ e4b->bd_info);
+ break;
+ }
+ len = start + len - ord_end;
+ start = ord_end;
}
mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
@@ -2149,10 +2161,10 @@ static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
* double allocate blocks. The reference is dropped
* in ext4_mb_release_context
*/
- ac->ac_bitmap_page = e4b->bd_bitmap_page;
- get_page(ac->ac_bitmap_page);
- ac->ac_buddy_page = e4b->bd_buddy_page;
- get_page(ac->ac_buddy_page);
+ ac->ac_bitmap_folio = e4b->bd_bitmap_folio;
+ folio_get(ac->ac_bitmap_folio);
+ ac->ac_buddy_folio = e4b->bd_buddy_folio;
+ folio_get(ac->ac_buddy_folio);
/* store last allocated for subsequent stream allocation */
if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
spin_lock(&sbi->s_md_lock);
@@ -2675,7 +2687,7 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
int ret;
/*
- * cr=CR_POWER2_ALIGNED/CR_GOAL_LEN_FAST is a very optimistic
+ * CR_POWER2_ALIGNED/CR_GOAL_LEN_FAST is a very optimistic
* search to find large good chunks almost for free. If buddy
* data is not ready, then this optimization makes no sense. But
* we never skip the first block group in a flex_bg, since this
@@ -2856,6 +2868,7 @@ repeat:
group = ac->ac_g_ex.fe_group;
ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups;
prefetch_grp = group;
+ nr = 0;
for (i = 0, new_cr = cr; i < ngroups; i++,
ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) {
@@ -3186,7 +3199,6 @@ int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
}
static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos)
-__acquires(&EXT4_SB(sb)->s_mb_rb_lock)
{
struct super_block *sb = pde_data(file_inode(seq->file));
unsigned long position;
@@ -3440,10 +3452,11 @@ static int ext4_mb_init_backend(struct super_block *sb)
}
if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
sbi->s_mb_prefetch = ext4_get_groups_count(sb);
- /* now many real IOs to prefetch within a single allocation at cr=0
- * given cr=0 is an CPU-related optimization we shouldn't try to
- * load too many groups, at some point we should start to use what
- * we've got in memory.
+ /*
+ * now many real IOs to prefetch within a single allocation at
+ * CR_POWER2_ALIGNED. Given CR_POWER2_ALIGNED is an CPU-related
+ * optimization we shouldn't try to load too many groups, at some point
+ * we should start to use what we've got in memory.
* with an average random access time 5ms, it'd take a second to get
* 200 groups (* N with flex_bg), so let's make this limit 4
*/
@@ -3884,8 +3897,8 @@ static void ext4_free_data_in_buddy(struct super_block *sb,
/* No more items in the per group rb tree
* balance refcounts from ext4_mb_free_metadata()
*/
- put_page(e4b.bd_buddy_page);
- put_page(e4b.bd_bitmap_page);
+ folio_put(e4b.bd_buddy_folio);
+ folio_put(e4b.bd_bitmap_folio);
}
ext4_unlock_group(sb, entry->efd_group);
ext4_mb_unload_buddy(&e4b);
@@ -5989,10 +6002,10 @@ static void ext4_mb_release_context(struct ext4_allocation_context *ac)
ext4_mb_put_pa(ac, ac->ac_sb, pa);
}
- if (ac->ac_bitmap_page)
- put_page(ac->ac_bitmap_page);
- if (ac->ac_buddy_page)
- put_page(ac->ac_buddy_page);
+ if (ac->ac_bitmap_folio)
+ folio_put(ac->ac_bitmap_folio);
+ if (ac->ac_buddy_folio)
+ folio_put(ac->ac_buddy_folio);
if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
mutex_unlock(&ac->ac_lg->lg_mutex);
ext4_mb_collect_stats(ac);
@@ -6113,6 +6126,7 @@ ext4_mb_new_blocks_simple(struct ext4_allocation_request *ar, int *errp)
ext4_mb_mark_bb(sb, block, 1, true);
ar->len = 1;
+ *errp = 0;
return block;
}
@@ -6307,8 +6321,8 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
struct rb_node *parent = NULL, *new_node;
BUG_ON(!ext4_handle_valid(handle));
- BUG_ON(e4b->bd_bitmap_page == NULL);
- BUG_ON(e4b->bd_buddy_page == NULL);
+ BUG_ON(e4b->bd_bitmap_folio == NULL);
+ BUG_ON(e4b->bd_buddy_folio == NULL);
new_node = &new_entry->efd_node;
cluster = new_entry->efd_start_cluster;
@@ -6319,8 +6333,8 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
* otherwise we'll refresh it from
* on-disk bitmap and lose not-yet-available
* blocks */
- get_page(e4b->bd_buddy_page);
- get_page(e4b->bd_bitmap_page);
+ folio_get(e4b->bd_buddy_folio);
+ folio_get(e4b->bd_bitmap_folio);
}
while (*n) {
parent = *n;
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index 56938532b4ce..d8553f1498d3 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -187,14 +187,14 @@ struct ext4_allocation_context {
struct ext4_free_extent ac_f_ex;
/*
- * goal len can change in CR1.5, so save the original len. This is
- * used while adjusting the PA window and for accounting.
+ * goal len can change in CR_BEST_AVAIL_LEN, so save the original len.
+ * This is used while adjusting the PA window and for accounting.
*/
ext4_grpblk_t ac_orig_goal_len;
__u32 ac_flags; /* allocation hints */
+ __u32 ac_groups_linear_remaining;
__u16 ac_groups_scanned;
- __u16 ac_groups_linear_remaining;
__u16 ac_found;
__u16 ac_cX_found[EXT4_MB_NUM_CRS];
__u16 ac_tail;
@@ -204,8 +204,8 @@ struct ext4_allocation_context {
__u8 ac_2order; /* if request is to allocate 2^N blocks and
* N > 0, the field stores N, otherwise 0 */
__u8 ac_op; /* operation, for history only */
- struct page *ac_bitmap_page;
- struct page *ac_buddy_page;
+ struct folio *ac_bitmap_folio;
+ struct folio *ac_buddy_folio;
struct ext4_prealloc_space *ac_pa;
struct ext4_locality_group *ac_lg;
};
@@ -215,9 +215,9 @@ struct ext4_allocation_context {
#define AC_STATUS_BREAK 3
struct ext4_buddy {
- struct page *bd_buddy_page;
+ struct folio *bd_buddy_folio;
void *bd_buddy;
- struct page *bd_bitmap_page;
+ struct folio *bd_bitmap_folio;
void *bd_bitmap;
struct ext4_group_info *bd_info;
struct super_block *bd_sb;
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 7cd4afa4de1d..204f53b23622 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -199,10 +199,8 @@ mext_page_mkuptodate(struct folio *folio, unsigned from, unsigned to)
continue;
if (!buffer_mapped(bh)) {
err = ext4_get_block(inode, block, bh, 0);
- if (err) {
- folio_set_error(folio);
+ if (err)
return err;
- }
if (!buffer_mapped(bh)) {
folio_zero_range(folio, block_start, blocksize);
set_buffer_uptodate(bh);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 5e4f65c14dfb..a630b27a4cc6 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2897,7 +2897,7 @@ retry:
inode = ext4_new_inode_start_handle(idmap, dir, mode,
NULL, 0, NULL,
EXT4_HT_DIR,
- EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) +
+ EXT4_MAXQUOTAS_TRANS_BLOCKS(dir->i_sb) +
4 + EXT4_XATTR_TRANS_BLOCKS);
handle = ext4_journal_current_handle();
err = PTR_ERR(inode);
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 312bc6813357..ad5543866d21 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -117,7 +117,6 @@ static void ext4_finish_bio(struct bio *bio)
if (bio->bi_status) {
int err = blk_status_to_errno(bio->bi_status);
- folio_set_error(folio);
mapping_set_error(folio->mapping, err);
}
bh = head = folio_buffers(folio);
@@ -441,8 +440,6 @@ int ext4_bio_write_folio(struct ext4_io_submit *io, struct folio *folio,
BUG_ON(!folio_test_locked(folio));
BUG_ON(folio_test_writeback(folio));
- folio_clear_error(folio);
-
/*
* Comments copied from block_write_full_folio:
*
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index 21e8f0aebb3c..8494492582ab 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -289,7 +289,6 @@ int ext4_mpage_readpages(struct inode *inode,
if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
set_error_page:
- folio_set_error(folio);
folio_zero_segment(folio, 0,
folio_size(folio));
folio_unlock(folio);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index cfb8449c731f..893ab80dafba 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1723,10 +1723,6 @@ static const struct constant_table ext4_param_dax[] = {
{}
};
-/* String parameter that allows empty argument */
-#define fsparam_string_empty(NAME, OPT) \
- __fsparam(fs_param_is_string, NAME, OPT, fs_param_can_be_empty, NULL)
-
/*
* Mount option specification
* We don't use fsparam_flag_no because of the way we set the
@@ -2078,8 +2074,7 @@ static int unnote_qf_name(struct fs_context *fc, int qtype)
{
struct ext4_fs_context *ctx = fc->fs_private;
- if (ctx->s_qf_names[qtype])
- kfree(ctx->s_qf_names[qtype]);
+ kfree(ctx->s_qf_names[qtype]);
ctx->s_qf_names[qtype] = NULL;
ctx->qname_spec |= 1 << qtype;
@@ -2484,8 +2479,7 @@ static int parse_options(struct fs_context *fc, char *options)
param.size = v_len;
ret = ext4_parse_param(fc, &param);
- if (param.string)
- kfree(param.string);
+ kfree(param.string);
if (ret < 0)
return ret;
}
@@ -5342,6 +5336,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
#endif
super_set_uuid(sb, es->s_uuid, sizeof(es->s_uuid));
+ super_set_sysfs_name_bdev(sb);
INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
mutex_init(&sbi->s_orphan_lock);
@@ -5551,19 +5546,15 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
if (err)
goto failed_mount6;
- err = ext4_register_sysfs(sb);
- if (err)
- goto failed_mount7;
-
err = ext4_init_orphan_info(sb);
if (err)
- goto failed_mount8;
+ goto failed_mount7;
#ifdef CONFIG_QUOTA
/* Enable quota usage during mount. */
if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) {
err = ext4_enable_quotas(sb);
if (err)
- goto failed_mount9;
+ goto failed_mount8;
}
#endif /* CONFIG_QUOTA */
@@ -5589,7 +5580,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
ext4_msg(sb, KERN_INFO, "recovery complete");
err = ext4_mark_recovery_complete(sb, es);
if (err)
- goto failed_mount10;
+ goto failed_mount9;
}
if (test_opt(sb, DISCARD) && !bdev_max_discard_sectors(sb->s_bdev))
@@ -5606,15 +5597,17 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
atomic_set(&sbi->s_warning_count, 0);
atomic_set(&sbi->s_msg_count, 0);
+ /* Register sysfs after all initializations are complete. */
+ err = ext4_register_sysfs(sb);
+ if (err)
+ goto failed_mount9;
+
return 0;
-failed_mount10:
+failed_mount9:
ext4_quotas_off(sb, EXT4_MAXQUOTAS);
-failed_mount9: __maybe_unused
+failed_mount8: __maybe_unused
ext4_release_orphan_info(sb);
-failed_mount8:
- ext4_unregister_sysfs(sb);
- kobject_put(&sbi->s_kobj);
failed_mount7:
ext4_unregister_li_request(sb);
failed_mount6:
@@ -5668,7 +5661,7 @@ failed_mount:
brelse(sbi->s_sbh);
if (sbi->s_journal_bdev_file) {
invalidate_bdev(file_bdev(sbi->s_journal_bdev_file));
- fput(sbi->s_journal_bdev_file);
+ bdev_fput(sbi->s_journal_bdev_file);
}
out_fail:
invalidate_bdev(sb->s_bdev);
@@ -5913,7 +5906,7 @@ static struct file *ext4_get_journal_blkdev(struct super_block *sb,
out_bh:
brelse(bh);
out_bdev:
- fput(bdev_file);
+ bdev_fput(bdev_file);
return ERR_PTR(errno);
}
@@ -5952,7 +5945,7 @@ static journal_t *ext4_open_dev_journal(struct super_block *sb,
out_journal:
jbd2_journal_destroy(journal);
out_bdev:
- fput(bdev_file);
+ bdev_fput(bdev_file);
return ERR_PTR(errno);
}
@@ -6130,8 +6123,8 @@ static void ext4_update_super(struct super_block *sb)
__ext4_update_tstamp(&es->s_first_error_time,
&es->s_first_error_time_hi,
sbi->s_first_error_time);
- strncpy(es->s_first_error_func, sbi->s_first_error_func,
- sizeof(es->s_first_error_func));
+ strtomem_pad(es->s_first_error_func,
+ sbi->s_first_error_func, 0);
es->s_first_error_line =
cpu_to_le32(sbi->s_first_error_line);
es->s_first_error_ino =
@@ -6144,8 +6137,7 @@ static void ext4_update_super(struct super_block *sb)
__ext4_update_tstamp(&es->s_last_error_time,
&es->s_last_error_time_hi,
sbi->s_last_error_time);
- strncpy(es->s_last_error_func, sbi->s_last_error_func,
- sizeof(es->s_last_error_func));
+ strtomem_pad(es->s_last_error_func, sbi->s_last_error_func, 0);
es->s_last_error_line = cpu_to_le32(sbi->s_last_error_line);
es->s_last_error_ino = cpu_to_le32(sbi->s_last_error_ino);
es->s_last_error_block = cpu_to_le64(sbi->s_last_error_block);
@@ -7327,7 +7319,7 @@ static void ext4_kill_sb(struct super_block *sb)
kill_block_super(sb);
if (bdev_file)
- fput(bdev_file);
+ bdev_fput(bdev_file);
}
static struct file_system_type ext4_fs_type = {
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index 6d332dff79dd..ddb54608ca2e 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -29,7 +29,10 @@ typedef enum {
attr_trigger_test_error,
attr_first_error_time,
attr_last_error_time,
+ attr_clusters_in_group,
+ attr_mb_order,
attr_feature,
+ attr_pointer_pi,
attr_pointer_ui,
attr_pointer_ul,
attr_pointer_u64,
@@ -104,7 +107,7 @@ static ssize_t reserved_clusters_store(struct ext4_sb_info *sbi,
int ret;
ret = kstrtoull(skip_spaces(buf), 0, &val);
- if (ret || val >= clusters)
+ if (ret || val >= clusters || (s64)val < 0)
return -EINVAL;
atomic64_set(&sbi->s_resv_clusters, val);
@@ -178,6 +181,9 @@ static struct ext4_attr ext4_attr_##_name = { \
#define EXT4_RO_ATTR_ES_STRING(_name,_elname,_size) \
EXT4_ATTR_STRING(_name, 0444, _size, ext4_super_block, _elname)
+#define EXT4_RW_ATTR_SBI_PI(_name,_elname) \
+ EXT4_ATTR_OFFSET(_name, 0644, pointer_pi, ext4_sb_info, _elname)
+
#define EXT4_RW_ATTR_SBI_UI(_name,_elname) \
EXT4_ATTR_OFFSET(_name, 0644, pointer_ui, ext4_sb_info, _elname)
@@ -207,23 +213,25 @@ EXT4_ATTR_FUNC(sra_exceeded_retry_limit, 0444);
EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, inode_readahead,
ext4_sb_info, s_inode_readahead_blks);
+EXT4_ATTR_OFFSET(mb_group_prealloc, 0644, clusters_in_group,
+ ext4_sb_info, s_mb_group_prealloc);
+EXT4_ATTR_OFFSET(mb_best_avail_max_trim_order, 0644, mb_order,
+ ext4_sb_info, s_mb_best_avail_max_trim_order);
EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal);
EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats);
EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan);
EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
-EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
EXT4_RW_ATTR_SBI_UI(mb_max_linear_groups, s_mb_max_linear_groups);
EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb);
EXT4_ATTR(trigger_fs_error, 0200, trigger_test_error);
-EXT4_RW_ATTR_SBI_UI(err_ratelimit_interval_ms, s_err_ratelimit_state.interval);
-EXT4_RW_ATTR_SBI_UI(err_ratelimit_burst, s_err_ratelimit_state.burst);
-EXT4_RW_ATTR_SBI_UI(warning_ratelimit_interval_ms, s_warning_ratelimit_state.interval);
-EXT4_RW_ATTR_SBI_UI(warning_ratelimit_burst, s_warning_ratelimit_state.burst);
-EXT4_RW_ATTR_SBI_UI(msg_ratelimit_interval_ms, s_msg_ratelimit_state.interval);
-EXT4_RW_ATTR_SBI_UI(msg_ratelimit_burst, s_msg_ratelimit_state.burst);
-EXT4_RW_ATTR_SBI_UI(mb_best_avail_max_trim_order, s_mb_best_avail_max_trim_order);
+EXT4_RW_ATTR_SBI_PI(err_ratelimit_interval_ms, s_err_ratelimit_state.interval);
+EXT4_RW_ATTR_SBI_PI(err_ratelimit_burst, s_err_ratelimit_state.burst);
+EXT4_RW_ATTR_SBI_PI(warning_ratelimit_interval_ms, s_warning_ratelimit_state.interval);
+EXT4_RW_ATTR_SBI_PI(warning_ratelimit_burst, s_warning_ratelimit_state.burst);
+EXT4_RW_ATTR_SBI_PI(msg_ratelimit_interval_ms, s_msg_ratelimit_state.interval);
+EXT4_RW_ATTR_SBI_PI(msg_ratelimit_burst, s_msg_ratelimit_state.burst);
#ifdef CONFIG_EXT4_DEBUG
EXT4_RW_ATTR_SBI_UL(simulate_fail, s_simulate_fail);
#endif
@@ -366,13 +374,45 @@ static ssize_t __print_tstamp(char *buf, __le32 lo, __u8 hi)
#define print_tstamp(buf, es, tstamp) \
__print_tstamp(buf, (es)->tstamp, (es)->tstamp ## _hi)
+static ssize_t ext4_generic_attr_show(struct ext4_attr *a,
+ struct ext4_sb_info *sbi, char *buf)
+{
+ void *ptr = calc_ptr(a, sbi);
+
+ if (!ptr)
+ return 0;
+
+ switch (a->attr_id) {
+ case attr_inode_readahead:
+ case attr_clusters_in_group:
+ case attr_mb_order:
+ case attr_pointer_pi:
+ case attr_pointer_ui:
+ if (a->attr_ptr == ptr_ext4_super_block_offset)
+ return sysfs_emit(buf, "%u\n", le32_to_cpup(ptr));
+ return sysfs_emit(buf, "%u\n", *((unsigned int *) ptr));
+ case attr_pointer_ul:
+ return sysfs_emit(buf, "%lu\n", *((unsigned long *) ptr));
+ case attr_pointer_u8:
+ return sysfs_emit(buf, "%u\n", *((unsigned char *) ptr));
+ case attr_pointer_u64:
+ if (a->attr_ptr == ptr_ext4_super_block_offset)
+ return sysfs_emit(buf, "%llu\n", le64_to_cpup(ptr));
+ return sysfs_emit(buf, "%llu\n", *((unsigned long long *) ptr));
+ case attr_pointer_string:
+ return sysfs_emit(buf, "%.*s\n", a->attr_size, (char *) ptr);
+ case attr_pointer_atomic:
+ return sysfs_emit(buf, "%d\n", atomic_read((atomic_t *) ptr));
+ }
+ return 0;
+}
+
static ssize_t ext4_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info,
s_kobj);
struct ext4_attr *a = container_of(attr, struct ext4_attr, attr);
- void *ptr = calc_ptr(a, sbi);
switch (a->attr_id) {
case attr_delayed_allocation_blocks:
@@ -391,45 +431,6 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
return sysfs_emit(buf, "%llu\n",
(unsigned long long)
percpu_counter_sum(&sbi->s_sra_exceeded_retry_limit));
- case attr_inode_readahead:
- case attr_pointer_ui:
- if (!ptr)
- return 0;
- if (a->attr_ptr == ptr_ext4_super_block_offset)
- return sysfs_emit(buf, "%u\n",
- le32_to_cpup(ptr));
- else
- return sysfs_emit(buf, "%u\n",
- *((unsigned int *) ptr));
- case attr_pointer_ul:
- if (!ptr)
- return 0;
- return sysfs_emit(buf, "%lu\n",
- *((unsigned long *) ptr));
- case attr_pointer_u8:
- if (!ptr)
- return 0;
- return sysfs_emit(buf, "%u\n",
- *((unsigned char *) ptr));
- case attr_pointer_u64:
- if (!ptr)
- return 0;
- if (a->attr_ptr == ptr_ext4_super_block_offset)
- return sysfs_emit(buf, "%llu\n",
- le64_to_cpup(ptr));
- else
- return sysfs_emit(buf, "%llu\n",
- *((unsigned long long *) ptr));
- case attr_pointer_string:
- if (!ptr)
- return 0;
- return sysfs_emit(buf, "%.*s\n", a->attr_size,
- (char *) ptr);
- case attr_pointer_atomic:
- if (!ptr)
- return 0;
- return sysfs_emit(buf, "%d\n",
- atomic_read((atomic_t *) ptr));
case attr_feature:
return sysfs_emit(buf, "supported\n");
case attr_first_error_time:
@@ -438,29 +439,34 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
return print_tstamp(buf, sbi->s_es, s_last_error_time);
case attr_journal_task:
return journal_task_show(sbi, buf);
+ default:
+ return ext4_generic_attr_show(a, sbi, buf);
}
-
- return 0;
}
-static ssize_t ext4_attr_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buf, size_t len)
+static ssize_t ext4_generic_attr_store(struct ext4_attr *a,
+ struct ext4_sb_info *sbi,
+ const char *buf, size_t len)
{
- struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info,
- s_kobj);
- struct ext4_attr *a = container_of(attr, struct ext4_attr, attr);
- void *ptr = calc_ptr(a, sbi);
- unsigned long t;
int ret;
+ unsigned int t;
+ unsigned long lt;
+ void *ptr = calc_ptr(a, sbi);
+
+ if (!ptr)
+ return 0;
switch (a->attr_id) {
- case attr_reserved_clusters:
- return reserved_clusters_store(sbi, buf, len);
+ case attr_pointer_pi:
+ ret = kstrtouint(skip_spaces(buf), 0, &t);
+ if (ret)
+ return ret;
+ if ((int)t < 0)
+ return -EINVAL;
+ *((unsigned int *) ptr) = t;
+ return len;
case attr_pointer_ui:
- if (!ptr)
- return 0;
- ret = kstrtoul(skip_spaces(buf), 0, &t);
+ ret = kstrtouint(skip_spaces(buf), 0, &t);
if (ret)
return ret;
if (a->attr_ptr == ptr_ext4_super_block_offset)
@@ -468,20 +474,50 @@ static ssize_t ext4_attr_store(struct kobject *kobj,
else
*((unsigned int *) ptr) = t;
return len;
+ case attr_mb_order:
+ ret = kstrtouint(skip_spaces(buf), 0, &t);
+ if (ret)
+ return ret;
+ if (t > 64)
+ return -EINVAL;
+ *((unsigned int *) ptr) = t;
+ return len;
+ case attr_clusters_in_group:
+ ret = kstrtouint(skip_spaces(buf), 0, &t);
+ if (ret)
+ return ret;
+ if (t > sbi->s_clusters_per_group)
+ return -EINVAL;
+ *((unsigned int *) ptr) = t;
+ return len;
case attr_pointer_ul:
- if (!ptr)
- return 0;
- ret = kstrtoul(skip_spaces(buf), 0, &t);
+ ret = kstrtoul(skip_spaces(buf), 0, &lt);
if (ret)
return ret;
- *((unsigned long *) ptr) = t;
+ *((unsigned long *) ptr) = lt;
return len;
+ }
+ return 0;
+}
+
+static ssize_t ext4_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t len)
+{
+ struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info,
+ s_kobj);
+ struct ext4_attr *a = container_of(attr, struct ext4_attr, attr);
+
+ switch (a->attr_id) {
+ case attr_reserved_clusters:
+ return reserved_clusters_store(sbi, buf, len);
case attr_inode_readahead:
return inode_readahead_blks_store(sbi, buf, len);
case attr_trigger_test_error:
return trigger_test_error(sbi, buf, len);
+ default:
+ return ext4_generic_attr_store(a, sbi, buf, len);
}
- return 0;
}
static void ext4_sb_release(struct kobject *kobj)
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index b67a176bfcf9..6460879b9fcb 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -1619,6 +1619,7 @@ out_err:
static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
struct ext4_xattr_search *s,
handle_t *handle, struct inode *inode,
+ struct inode *new_ea_inode,
bool is_block)
{
struct ext4_xattr_entry *last, *next;
@@ -1626,7 +1627,6 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
size_t min_offs = s->end - s->base, name_len = strlen(i->name);
int in_inode = i->in_inode;
struct inode *old_ea_inode = NULL;
- struct inode *new_ea_inode = NULL;
size_t old_size, new_size;
int ret;
@@ -1711,38 +1711,11 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
old_ea_inode = NULL;
goto out;
}
- }
- if (i->value && in_inode) {
- WARN_ON_ONCE(!i->value_len);
-
- new_ea_inode = ext4_xattr_inode_lookup_create(handle, inode,
- i->value, i->value_len);
- if (IS_ERR(new_ea_inode)) {
- ret = PTR_ERR(new_ea_inode);
- new_ea_inode = NULL;
- goto out;
- }
- }
- if (old_ea_inode) {
/* We are ready to release ref count on the old_ea_inode. */
ret = ext4_xattr_inode_dec_ref(handle, old_ea_inode);
- if (ret) {
- /* Release newly required ref count on new_ea_inode. */
- if (new_ea_inode) {
- int err;
-
- err = ext4_xattr_inode_dec_ref(handle,
- new_ea_inode);
- if (err)
- ext4_warning_inode(new_ea_inode,
- "dec ref new_ea_inode err=%d",
- err);
- ext4_xattr_inode_free_quota(inode, new_ea_inode,
- i->value_len);
- }
+ if (ret)
goto out;
- }
ext4_xattr_inode_free_quota(inode, old_ea_inode,
le32_to_cpu(here->e_value_size));
@@ -1866,7 +1839,6 @@ update_hash:
ret = 0;
out:
iput(old_ea_inode);
- iput(new_ea_inode);
return ret;
}
@@ -1929,9 +1901,21 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
size_t old_ea_inode_quota = 0;
unsigned int ea_ino;
-
#define header(x) ((struct ext4_xattr_header *)(x))
+ /* If we need EA inode, prepare it before locking the buffer */
+ if (i->value && i->in_inode) {
+ WARN_ON_ONCE(!i->value_len);
+
+ ea_inode = ext4_xattr_inode_lookup_create(handle, inode,
+ i->value, i->value_len);
+ if (IS_ERR(ea_inode)) {
+ error = PTR_ERR(ea_inode);
+ ea_inode = NULL;
+ goto cleanup;
+ }
+ }
+
if (s->base) {
int offset = (char *)s->here - bs->bh->b_data;
@@ -1940,6 +1924,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
EXT4_JTR_NONE);
if (error)
goto cleanup;
+
lock_buffer(bs->bh);
if (header(s->base)->h_refcount == cpu_to_le32(1)) {
@@ -1966,7 +1951,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
}
ea_bdebug(bs->bh, "modifying in-place");
error = ext4_xattr_set_entry(i, s, handle, inode,
- true /* is_block */);
+ ea_inode, true /* is_block */);
ext4_xattr_block_csum_set(inode, bs->bh);
unlock_buffer(bs->bh);
if (error == -EFSCORRUPTED)
@@ -2034,33 +2019,22 @@ clone_block:
s->end = s->base + sb->s_blocksize;
}
- error = ext4_xattr_set_entry(i, s, handle, inode, true /* is_block */);
+ error = ext4_xattr_set_entry(i, s, handle, inode, ea_inode,
+ true /* is_block */);
if (error == -EFSCORRUPTED)
goto bad_block;
if (error)
goto cleanup;
- if (i->value && s->here->e_value_inum) {
- /*
- * A ref count on ea_inode has been taken as part of the call to
- * ext4_xattr_set_entry() above. We would like to drop this
- * extra ref but we have to wait until the xattr block is
- * initialized and has its own ref count on the ea_inode.
- */
- ea_ino = le32_to_cpu(s->here->e_value_inum);
- error = ext4_xattr_inode_iget(inode, ea_ino,
- le32_to_cpu(s->here->e_hash),
- &ea_inode);
- if (error) {
- ea_inode = NULL;
+inserted:
+ if (!IS_LAST_ENTRY(s->first)) {
+ new_bh = ext4_xattr_block_cache_find(inode, header(s->base), &ce);
+ if (IS_ERR(new_bh)) {
+ error = PTR_ERR(new_bh);
+ new_bh = NULL;
goto cleanup;
}
- }
-inserted:
- if (!IS_LAST_ENTRY(s->first)) {
- new_bh = ext4_xattr_block_cache_find(inode, header(s->base),
- &ce);
if (new_bh) {
/* We found an identical block in the cache. */
if (new_bh == bs->bh)
@@ -2158,6 +2132,17 @@ getblk_failed:
ENTRY(header(s->base)+1));
if (error)
goto getblk_failed;
+ if (ea_inode) {
+ /* Drop the extra ref on ea_inode. */
+ error = ext4_xattr_inode_dec_ref(handle,
+ ea_inode);
+ if (error)
+ ext4_warning_inode(ea_inode,
+ "dec ref error=%d",
+ error);
+ iput(ea_inode);
+ ea_inode = NULL;
+ }
lock_buffer(new_bh);
error = ext4_journal_get_create_access(handle, sb,
@@ -2198,17 +2183,16 @@ getblk_failed:
cleanup:
if (ea_inode) {
- int error2;
-
- error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
- if (error2)
- ext4_warning_inode(ea_inode, "dec ref error=%d",
- error2);
+ if (error) {
+ int error2;
- /* If there was an error, revert the quota charge. */
- if (error)
+ error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
+ if (error2)
+ ext4_warning_inode(ea_inode, "dec ref error=%d",
+ error2);
ext4_xattr_inode_free_quota(inode, ea_inode,
i_size_read(ea_inode));
+ }
iput(ea_inode);
}
if (ce)
@@ -2266,14 +2250,38 @@ int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
{
struct ext4_xattr_ibody_header *header;
struct ext4_xattr_search *s = &is->s;
+ struct inode *ea_inode = NULL;
int error;
if (!EXT4_INODE_HAS_XATTR_SPACE(inode))
return -ENOSPC;
- error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */);
- if (error)
+ /* If we need EA inode, prepare it before locking the buffer */
+ if (i->value && i->in_inode) {
+ WARN_ON_ONCE(!i->value_len);
+
+ ea_inode = ext4_xattr_inode_lookup_create(handle, inode,
+ i->value, i->value_len);
+ if (IS_ERR(ea_inode))
+ return PTR_ERR(ea_inode);
+ }
+ error = ext4_xattr_set_entry(i, s, handle, inode, ea_inode,
+ false /* is_block */);
+ if (error) {
+ if (ea_inode) {
+ int error2;
+
+ error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
+ if (error2)
+ ext4_warning_inode(ea_inode, "dec ref error=%d",
+ error2);
+
+ ext4_xattr_inode_free_quota(inode, ea_inode,
+ i_size_read(ea_inode));
+ iput(ea_inode);
+ }
return error;
+ }
header = IHDR(inode, ext4_raw_inode(&is->iloc));
if (!IS_LAST_ENTRY(s->first)) {
header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
@@ -2282,6 +2290,7 @@ int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
header->h_magic = cpu_to_le32(0);
ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
}
+ iput(ea_inode);
return 0;
}
@@ -3090,8 +3099,8 @@ ext4_xattr_cmp(struct ext4_xattr_header *header1,
*
* Find an identical extended attribute block.
*
- * Returns a pointer to the block found, or NULL if such a block was
- * not found or an error occurred.
+ * Returns a pointer to the block found, or NULL if such a block was not
+ * found, or an error pointer if an error occurred while reading ea block.
*/
static struct buffer_head *
ext4_xattr_block_cache_find(struct inode *inode,
@@ -3113,11 +3122,11 @@ ext4_xattr_block_cache_find(struct inode *inode,
bh = ext4_sb_bread(inode->i_sb, ce->e_value, REQ_PRIO);
if (IS_ERR(bh)) {
- if (PTR_ERR(bh) == -ENOMEM)
- return NULL;
- bh = NULL;
- EXT4_ERROR_INODE(inode, "block %lu read error",
- (unsigned long)ce->e_value);
+ if (PTR_ERR(bh) != -ENOMEM)
+ EXT4_ERROR_INODE(inode, "block %lu read error",
+ (unsigned long)ce->e_value);
+ mb_cache_entry_put(ea_block_cache, ce);
+ return bh;
} else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
*pce = ce;
return bh;
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index d9494b5fc7c1..961e6ff77c72 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -4082,11 +4082,12 @@ const struct address_space_operations f2fs_dblock_aops = {
void f2fs_clear_page_cache_dirty_tag(struct page *page)
{
- struct address_space *mapping = page_mapping(page);
+ struct folio *folio = page_folio(page);
+ struct address_space *mapping = folio->mapping;
unsigned long flags;
xa_lock_irqsave(&mapping->i_pages, flags);
- __xa_clear_mark(&mapping->i_pages, page_index(page),
+ __xa_clear_mark(&mapping->i_pages, folio->index,
PAGECACHE_TAG_DIRTY);
xa_unlock_irqrestore(&mapping->i_pages, flags);
}
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 1761ad125f97..2b65e09822d4 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -569,7 +569,7 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
if (err)
return err;
- filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
+ filp->f_mode |= FMODE_NOWAIT;
filp->f_mode |= FMODE_CAN_ODIRECT;
return dquot_file_open(inode, filp);
@@ -5045,4 +5045,5 @@ const struct file_operations f2fs_file_operations = {
.splice_read = f2fs_file_splice_read,
.splice_write = iter_file_splice_write,
.fadvise = f2fs_file_fadvise,
+ .fop_flags = FOP_BUFFER_RASYNC,
};
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index a6867f26f141..a4bc26dfdb1a 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1558,7 +1558,7 @@ static void destroy_device_list(struct f2fs_sb_info *sbi)
for (i = 0; i < sbi->s_ndevs; i++) {
if (i > 0)
- fput(FDEV(i).bdev_file);
+ bdev_fput(FDEV(i).bdev_file);
#ifdef CONFIG_BLK_DEV_ZONED
kvfree(FDEV(i).blkz_seq);
#endif
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index 00235b8a1823..acbec5bdd521 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -269,6 +269,18 @@ enum { PARSE_INVALID = 1, PARSE_NOT_LONGNAME, PARSE_EOF, };
/**
* fat_parse_long - Parse extended directory entry.
*
+ * @dir: Pointer to the inode that represents the directory.
+ * @pos: On input, contains the starting position to read from.
+ * On output, updated with the new position.
+ * @bh: Pointer to the buffer head that may be used for reading directory
+ * entries. May be updated.
+ * @de: On input, points to the current directory entry.
+ * On output, points to the next directory entry.
+ * @unicode: Pointer to a buffer where the parsed Unicode long filename will be
+ * stored.
+ * @nr_slots: Pointer to a variable that will store the number of longname
+ * slots found.
+ *
* This function returns zero on success, negative value on error, or one of
* the following:
*
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 54cc85d3338e..300e5d9ad913 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -327,6 +327,22 @@ static long fcntl_set_rw_hint(struct file *file, unsigned int cmd,
return 0;
}
+/* Is the file descriptor a dup of the file? */
+static long f_dupfd_query(int fd, struct file *filp)
+{
+ CLASS(fd_raw, f)(fd);
+
+ /*
+ * We can do the 'fdput()' immediately, as the only thing that
+ * matters is the pointer value which isn't changed by the fdput.
+ *
+ * Technically we didn't need a ref at all, and 'fdget()' was
+ * overkill, but given our lockless file pointer lookup, the
+ * alternatives are complicated.
+ */
+ return f.file == filp;
+}
+
static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
struct file *filp)
{
@@ -342,6 +358,9 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
case F_DUPFD_CLOEXEC:
err = f_dupfd(argi, filp, O_CLOEXEC);
break;
+ case F_DUPFD_QUERY:
+ err = f_dupfd_query(argi, filp);
+ break;
case F_GETFD:
err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
break;
@@ -446,6 +465,7 @@ static int check_fcntl_cmd(unsigned cmd)
switch (cmd) {
case F_DUPFD:
case F_DUPFD_CLOEXEC:
+ case F_DUPFD_QUERY:
case F_GETFD:
case F_SETFD:
case F_GETFL:
diff --git a/fs/fhandle.c b/fs/fhandle.c
index 57a12614addf..8a7f86c2139a 100644
--- a/fs/fhandle.c
+++ b/fs/fhandle.c
@@ -36,7 +36,7 @@ static long do_sys_name_to_handle(const struct path *path,
if (f_handle.handle_bytes > MAX_HANDLE_SZ)
return -EINVAL;
- handle = kzalloc(sizeof(struct file_handle) + f_handle.handle_bytes,
+ handle = kzalloc(struct_size(handle, f_handle, f_handle.handle_bytes),
GFP_KERNEL);
if (!handle)
return -ENOMEM;
@@ -71,7 +71,7 @@ static long do_sys_name_to_handle(const struct path *path,
/* copy the mount id */
if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
copy_to_user(ufh, handle,
- sizeof(struct file_handle) + handle_bytes))
+ struct_size(handle, f_handle, handle_bytes)))
retval = -EFAULT;
kfree(handle);
return retval;
@@ -192,7 +192,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
retval = -EINVAL;
goto out_err;
}
- handle = kmalloc(sizeof(struct file_handle) + f_handle.handle_bytes,
+ handle = kmalloc(struct_size(handle, f_handle, f_handle.handle_bytes),
GFP_KERNEL);
if (!handle) {
retval = -ENOMEM;
diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c
index 42e03b6b1cc7..fabe60778658 100644
--- a/fs/freevxfs/vxfs_super.c
+++ b/fs/freevxfs/vxfs_super.c
@@ -17,7 +17,7 @@
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/vfs.h>
-#include <linux/mount.h>
+#include <linux/fs_context.h>
#include "vxfs.h"
#include "vxfs_extern.h"
@@ -91,10 +91,10 @@ vxfs_statfs(struct dentry *dentry, struct kstatfs *bufp)
return 0;
}
-static int vxfs_remount(struct super_block *sb, int *flags, char *data)
+static int vxfs_reconfigure(struct fs_context *fc)
{
- sync_filesystem(sb);
- *flags |= SB_RDONLY;
+ sync_filesystem(fc->root->d_sb);
+ fc->sb_flags |= SB_RDONLY;
return 0;
}
@@ -120,24 +120,24 @@ static const struct super_operations vxfs_super_ops = {
.evict_inode = vxfs_evict_inode,
.put_super = vxfs_put_super,
.statfs = vxfs_statfs,
- .remount_fs = vxfs_remount,
};
-static int vxfs_try_sb_magic(struct super_block *sbp, int silent,
+static int vxfs_try_sb_magic(struct super_block *sbp, struct fs_context *fc,
unsigned blk, __fs32 magic)
{
struct buffer_head *bp;
struct vxfs_sb *rsbp;
struct vxfs_sb_info *infp = VXFS_SBI(sbp);
+ int silent = fc->sb_flags & SB_SILENT;
int rc = -ENOMEM;
bp = sb_bread(sbp, blk);
do {
if (!bp || !buffer_mapped(bp)) {
if (!silent) {
- printk(KERN_WARNING
- "vxfs: unable to read disk superblock at %u\n",
- blk);
+ warnf(fc,
+ "vxfs: unable to read disk superblock at %u",
+ blk);
}
break;
}
@@ -146,9 +146,9 @@ static int vxfs_try_sb_magic(struct super_block *sbp, int silent,
rsbp = (struct vxfs_sb *)bp->b_data;
if (rsbp->vs_magic != magic) {
if (!silent)
- printk(KERN_NOTICE
- "vxfs: WRONG superblock magic %08x at %u\n",
- rsbp->vs_magic, blk);
+ infof(fc,
+ "vxfs: WRONG superblock magic %08x at %u",
+ rsbp->vs_magic, blk);
break;
}
@@ -169,8 +169,7 @@ static int vxfs_try_sb_magic(struct super_block *sbp, int silent,
/**
* vxfs_fill_super - read superblock into memory and initialize filesystem
* @sbp: VFS superblock (to fill)
- * @dp: fs private mount data
- * @silent: do not complain loudly when sth is wrong
+ * @fc: filesytem context
*
* Description:
* We are called on the first mount of a filesystem to read the
@@ -182,26 +181,27 @@ static int vxfs_try_sb_magic(struct super_block *sbp, int silent,
* Locking:
* We are under @sbp->s_lock.
*/
-static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent)
+static int vxfs_fill_super(struct super_block *sbp, struct fs_context *fc)
{
struct vxfs_sb_info *infp;
struct vxfs_sb *rsbp;
u_long bsize;
struct inode *root;
int ret = -EINVAL;
+ int silent = fc->sb_flags & SB_SILENT;
u32 j;
sbp->s_flags |= SB_RDONLY;
infp = kzalloc(sizeof(*infp), GFP_KERNEL);
if (!infp) {
- printk(KERN_WARNING "vxfs: unable to allocate incore superblock\n");
+ warnf(fc, "vxfs: unable to allocate incore superblock");
return -ENOMEM;
}
bsize = sb_min_blocksize(sbp, BLOCK_SIZE);
if (!bsize) {
- printk(KERN_WARNING "vxfs: unable to set blocksize\n");
+ warnf(fc, "vxfs: unable to set blocksize");
goto out;
}
@@ -210,24 +210,24 @@ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent)
sbp->s_time_min = 0;
sbp->s_time_max = U32_MAX;
- if (!vxfs_try_sb_magic(sbp, silent, 1,
+ if (!vxfs_try_sb_magic(sbp, fc, 1,
(__force __fs32)cpu_to_le32(VXFS_SUPER_MAGIC))) {
/* Unixware, x86 */
infp->byte_order = VXFS_BO_LE;
- } else if (!vxfs_try_sb_magic(sbp, silent, 8,
+ } else if (!vxfs_try_sb_magic(sbp, fc, 8,
(__force __fs32)cpu_to_be32(VXFS_SUPER_MAGIC))) {
/* HP-UX, parisc */
infp->byte_order = VXFS_BO_BE;
} else {
if (!silent)
- printk(KERN_NOTICE "vxfs: can't find superblock.\n");
+ infof(fc, "vxfs: can't find superblock.");
goto out;
}
rsbp = infp->vsi_raw;
j = fs32_to_cpu(infp, rsbp->vs_version);
if ((j < 2 || j > 4) && !silent) {
- printk(KERN_NOTICE "vxfs: unsupported VxFS version (%d)\n", j);
+ infof(fc, "vxfs: unsupported VxFS version (%d)", j);
goto out;
}
@@ -244,17 +244,17 @@ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent)
j = fs32_to_cpu(infp, rsbp->vs_bsize);
if (!sb_set_blocksize(sbp, j)) {
- printk(KERN_WARNING "vxfs: unable to set final block size\n");
+ warnf(fc, "vxfs: unable to set final block size");
goto out;
}
if (vxfs_read_olt(sbp, bsize)) {
- printk(KERN_WARNING "vxfs: unable to read olt\n");
+ warnf(fc, "vxfs: unable to read olt");
goto out;
}
if (vxfs_read_fshead(sbp)) {
- printk(KERN_WARNING "vxfs: unable to read fshead\n");
+ warnf(fc, "vxfs: unable to read fshead");
goto out;
}
@@ -265,7 +265,7 @@ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent)
}
sbp->s_root = d_make_root(root);
if (!sbp->s_root) {
- printk(KERN_WARNING "vxfs: unable to get root dentry.\n");
+ warnf(fc, "vxfs: unable to get root dentry.");
goto out_free_ilist;
}
@@ -284,18 +284,29 @@ out:
/*
* The usual module blurb.
*/
-static struct dentry *vxfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int vxfs_get_tree(struct fs_context *fc)
{
- return mount_bdev(fs_type, flags, dev_name, data, vxfs_fill_super);
+ return get_tree_bdev(fc, vxfs_fill_super);
+}
+
+static const struct fs_context_operations vxfs_context_ops = {
+ .get_tree = vxfs_get_tree,
+ .reconfigure = vxfs_reconfigure,
+};
+
+static int vxfs_init_fs_context(struct fs_context *fc)
+{
+ fc->ops = &vxfs_context_ops;
+
+ return 0;
}
static struct file_system_type vxfs_fs_type = {
.owner = THIS_MODULE,
.name = "vxfs",
- .mount = vxfs_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
+ .init_fs_context = vxfs_init_fs_context,
};
MODULE_ALIAS_FS("vxfs"); /* makes mount -t vxfs autoload the module */
MODULE_ALIAS("vxfs");
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index e4f17c53ddfc..92a5b8283528 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -166,8 +166,7 @@ static void wb_wakeup_delayed(struct bdi_writeback *wb)
spin_unlock_irq(&wb->work_lock);
}
-static void finish_writeback_work(struct bdi_writeback *wb,
- struct wb_writeback_work *work)
+static void finish_writeback_work(struct wb_writeback_work *work)
{
struct wb_completion *done = work->done;
@@ -196,7 +195,7 @@ static void wb_queue_work(struct bdi_writeback *wb,
list_add_tail(&work->list, &wb->work_list);
mod_delayed_work(bdi_wq, &wb->dwork, 0);
} else
- finish_writeback_work(wb, work);
+ finish_writeback_work(work);
spin_unlock_irq(&wb->work_lock);
}
@@ -1561,7 +1560,8 @@ static void inode_sleep_on_writeback(struct inode *inode)
* thread's back can have unexpected consequences.
*/
static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
- struct writeback_control *wbc)
+ struct writeback_control *wbc,
+ unsigned long dirtied_before)
{
if (inode->i_state & I_FREEING)
return;
@@ -1594,7 +1594,8 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
* We didn't write back all the pages. nfs_writepages()
* sometimes bales out without doing anything.
*/
- if (wbc->nr_to_write <= 0) {
+ if (wbc->nr_to_write <= 0 &&
+ !inode_dirtied_after(inode, dirtied_before)) {
/* Slice used up. Queue for next turn. */
requeue_io(inode, wb);
} else {
@@ -1862,6 +1863,11 @@ static long writeback_sb_inodes(struct super_block *sb,
unsigned long start_time = jiffies;
long write_chunk;
long total_wrote = 0; /* count both pages and inodes */
+ unsigned long dirtied_before = jiffies;
+
+ if (work->for_kupdate)
+ dirtied_before = jiffies -
+ msecs_to_jiffies(dirty_expire_interval * 10);
while (!list_empty(&wb->b_io)) {
struct inode *inode = wb_inode(wb->b_io.prev);
@@ -1967,7 +1973,7 @@ static long writeback_sb_inodes(struct super_block *sb,
spin_lock(&inode->i_lock);
if (!(inode->i_state & I_DIRTY_ALL))
total_wrote++;
- requeue_inode(inode, tmp_wb, &wbc);
+ requeue_inode(inode, tmp_wb, &wbc, dirtied_before);
inode_sync_complete(inode);
spin_unlock(&inode->i_lock);
@@ -2069,6 +2075,7 @@ static long wb_writeback(struct bdi_writeback *wb,
struct inode *inode;
long progress;
struct blk_plug plug;
+ bool queued = false;
blk_start_plug(&plug);
for (;;) {
@@ -2098,21 +2105,24 @@ static long wb_writeback(struct bdi_writeback *wb,
spin_lock(&wb->list_lock);
- /*
- * Kupdate and background works are special and we want to
- * include all inodes that need writing. Livelock avoidance is
- * handled by these works yielding to any other work so we are
- * safe.
- */
- if (work->for_kupdate) {
- dirtied_before = jiffies -
- msecs_to_jiffies(dirty_expire_interval * 10);
- } else if (work->for_background)
- dirtied_before = jiffies;
-
trace_writeback_start(wb, work);
- if (list_empty(&wb->b_io))
+ if (list_empty(&wb->b_io)) {
+ /*
+ * Kupdate and background works are special and we want
+ * to include all inodes that need writing. Livelock
+ * avoidance is handled by these works yielding to any
+ * other work so we are safe.
+ */
+ if (work->for_kupdate) {
+ dirtied_before = jiffies -
+ msecs_to_jiffies(dirty_expire_interval *
+ 10);
+ } else if (work->for_background)
+ dirtied_before = jiffies;
+
queue_io(wb, work, dirtied_before);
+ queued = true;
+ }
if (work->sb)
progress = writeback_sb_inodes(work->sb, wb, work);
else
@@ -2127,7 +2137,7 @@ static long wb_writeback(struct bdi_writeback *wb,
* mean the overall work is done. So we keep looping as long
* as made some progress on cleaning pages or inodes.
*/
- if (progress) {
+ if (progress || !queued) {
spin_unlock(&wb->list_lock);
continue;
}
@@ -2262,7 +2272,7 @@ static long wb_do_writeback(struct bdi_writeback *wb)
while ((work = get_next_work_item(wb)) != NULL) {
trace_writeback_exec(wb, work);
wrote += wb_writeback(wb, work);
- finish_writeback_work(wb, work);
+ finish_writeback_work(work);
}
/*
@@ -2322,8 +2332,7 @@ void wb_workfn(struct work_struct *work)
}
/*
- * Start writeback of `nr_pages' pages on this bdi. If `nr_pages' is zero,
- * write back the whole world.
+ * Start writeback of all dirty pages on this bdi.
*/
static void __wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
enum wb_reason reason)
@@ -2726,7 +2735,7 @@ EXPORT_SYMBOL(writeback_inodes_sb_nr);
*/
void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
{
- return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
+ writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
}
EXPORT_SYMBOL(writeback_inodes_sb);
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index b6cad106c37e..0b2da7b7e2ad 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -310,6 +310,10 @@ struct cuse_init_args {
/**
* cuse_process_init_reply - finish initializing CUSE channel
*
+ * @fm: The fuse mount information containing the CUSE connection.
+ * @args: The arguments passed to the init reply.
+ * @error: The error code signifying if any error occurred during the process.
+ *
* This function creates the character device and sets up all the
* required data structures for it. Please read the comment at the
* top of this file for high level overview.
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 4a6df591add6..2b0d4781f394 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1321,6 +1321,7 @@ retry:
err = fuse_do_statx(inode, file, stat);
if (err == -ENOSYS) {
fc->no_statx = 1;
+ err = 0;
goto retry;
}
} else {
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index a56e7bffd000..b57ce4157640 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1362,7 +1362,7 @@ static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from,
bool *exclusive)
{
struct inode *inode = file_inode(iocb->ki_filp);
- struct fuse_file *ff = iocb->ki_filp->private_data;
+ struct fuse_inode *fi = get_fuse_inode(inode);
*exclusive = fuse_dio_wr_exclusive_lock(iocb, from);
if (*exclusive) {
@@ -1377,7 +1377,7 @@ static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from,
* have raced, so check it again.
*/
if (fuse_io_past_eof(iocb, from) ||
- fuse_file_uncached_io_start(inode, ff, NULL) != 0) {
+ fuse_inode_uncached_io_start(fi, NULL) != 0) {
inode_unlock_shared(inode);
inode_lock(inode);
*exclusive = true;
@@ -1388,13 +1388,13 @@ static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from,
static void fuse_dio_unlock(struct kiocb *iocb, bool exclusive)
{
struct inode *inode = file_inode(iocb->ki_filp);
- struct fuse_file *ff = iocb->ki_filp->private_data;
+ struct fuse_inode *fi = get_fuse_inode(inode);
if (exclusive) {
inode_unlock(inode);
} else {
/* Allow opens in caching mode after last parallel dio end */
- fuse_file_uncached_io_end(inode, ff);
+ fuse_inode_uncached_io_end(fi);
inode_unlock_shared(inode);
}
}
@@ -2574,8 +2574,10 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
* First mmap of direct_io file enters caching inode io mode.
* Also waits for parallel dio writers to go into serial mode
* (exclusive instead of shared lock).
+ * After first mmap, the inode stays in caching io mode until
+ * the direct_io file release.
*/
- rc = fuse_file_cached_io_start(inode, ff);
+ rc = fuse_file_cached_io_open(inode, ff);
if (rc)
return rc;
}
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index b24084b60864..f23919610313 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -1394,9 +1394,10 @@ int fuse_fileattr_set(struct mnt_idmap *idmap,
struct dentry *dentry, struct fileattr *fa);
/* iomode.c */
-int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff);
-int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff, struct fuse_backing *fb);
-void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff);
+int fuse_file_cached_io_open(struct inode *inode, struct fuse_file *ff);
+int fuse_inode_uncached_io_start(struct fuse_inode *fi,
+ struct fuse_backing *fb);
+void fuse_inode_uncached_io_end(struct fuse_inode *fi);
int fuse_file_io_open(struct file *file, struct inode *inode);
void fuse_file_io_release(struct fuse_file *ff, struct inode *inode);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 3a5d88878335..99e44ea7d875 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -175,6 +175,7 @@ static void fuse_evict_inode(struct inode *inode)
}
}
if (S_ISREG(inode->i_mode) && !fuse_is_bad(inode)) {
+ WARN_ON(fi->iocachectr != 0);
WARN_ON(!list_empty(&fi->write_files));
WARN_ON(!list_empty(&fi->queued_writes));
}
diff --git a/fs/fuse/iomode.c b/fs/fuse/iomode.c
index c653ddcf0578..c99e285f3183 100644
--- a/fs/fuse/iomode.c
+++ b/fs/fuse/iomode.c
@@ -21,12 +21,13 @@ static inline bool fuse_is_io_cache_wait(struct fuse_inode *fi)
}
/*
- * Start cached io mode.
+ * Called on cached file open() and on first mmap() of direct_io file.
+ * Takes cached_io inode mode reference to be dropped on file release.
*
* Blocks new parallel dio writes and waits for the in-progress parallel dio
* writes to complete.
*/
-int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff)
+int fuse_file_cached_io_open(struct inode *inode, struct fuse_file *ff)
{
struct fuse_inode *fi = get_fuse_inode(inode);
@@ -67,10 +68,9 @@ int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff)
return 0;
}
-static void fuse_file_cached_io_end(struct inode *inode, struct fuse_file *ff)
+static void fuse_file_cached_io_release(struct fuse_file *ff,
+ struct fuse_inode *fi)
{
- struct fuse_inode *fi = get_fuse_inode(inode);
-
spin_lock(&fi->lock);
WARN_ON(fi->iocachectr <= 0);
WARN_ON(ff->iomode != IOM_CACHED);
@@ -82,16 +82,15 @@ static void fuse_file_cached_io_end(struct inode *inode, struct fuse_file *ff)
}
/* Start strictly uncached io mode where cache access is not allowed */
-int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff, struct fuse_backing *fb)
+int fuse_inode_uncached_io_start(struct fuse_inode *fi, struct fuse_backing *fb)
{
- struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_backing *oldfb;
int err = 0;
spin_lock(&fi->lock);
/* deny conflicting backing files on same fuse inode */
oldfb = fuse_inode_backing(fi);
- if (oldfb && oldfb != fb) {
+ if (fb && oldfb && oldfb != fb) {
err = -EBUSY;
goto unlock;
}
@@ -99,12 +98,10 @@ int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff, struc
err = -ETXTBSY;
goto unlock;
}
- WARN_ON(ff->iomode != IOM_NONE);
fi->iocachectr--;
- ff->iomode = IOM_UNCACHED;
/* fuse inode holds a single refcount of backing file */
- if (!oldfb) {
+ if (fb && !oldfb) {
oldfb = fuse_inode_backing_set(fi, fb);
WARN_ON_ONCE(oldfb != NULL);
} else {
@@ -115,15 +112,29 @@ unlock:
return err;
}
-void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff)
+/* Takes uncached_io inode mode reference to be dropped on file release */
+static int fuse_file_uncached_io_open(struct inode *inode,
+ struct fuse_file *ff,
+ struct fuse_backing *fb)
{
struct fuse_inode *fi = get_fuse_inode(inode);
+ int err;
+
+ err = fuse_inode_uncached_io_start(fi, fb);
+ if (err)
+ return err;
+
+ WARN_ON(ff->iomode != IOM_NONE);
+ ff->iomode = IOM_UNCACHED;
+ return 0;
+}
+
+void fuse_inode_uncached_io_end(struct fuse_inode *fi)
+{
struct fuse_backing *oldfb = NULL;
spin_lock(&fi->lock);
WARN_ON(fi->iocachectr >= 0);
- WARN_ON(ff->iomode != IOM_UNCACHED);
- ff->iomode = IOM_NONE;
fi->iocachectr++;
if (!fi->iocachectr) {
wake_up(&fi->direct_io_waitq);
@@ -134,6 +145,15 @@ void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff)
fuse_backing_put(oldfb);
}
+/* Drop uncached_io reference from passthrough open */
+static void fuse_file_uncached_io_release(struct fuse_file *ff,
+ struct fuse_inode *fi)
+{
+ WARN_ON(ff->iomode != IOM_UNCACHED);
+ ff->iomode = IOM_NONE;
+ fuse_inode_uncached_io_end(fi);
+}
+
/*
* Open flags that are allowed in combination with FOPEN_PASSTHROUGH.
* A combination of FOPEN_PASSTHROUGH and FOPEN_DIRECT_IO means that read/write
@@ -163,7 +183,7 @@ static int fuse_file_passthrough_open(struct inode *inode, struct file *file)
return PTR_ERR(fb);
/* First passthrough file open denies caching inode io mode */
- err = fuse_file_uncached_io_start(inode, ff, fb);
+ err = fuse_file_uncached_io_open(inode, ff, fb);
if (!err)
return 0;
@@ -216,7 +236,7 @@ int fuse_file_io_open(struct file *file, struct inode *inode)
if (ff->open_flags & FOPEN_PASSTHROUGH)
err = fuse_file_passthrough_open(inode, file);
else
- err = fuse_file_cached_io_start(inode, ff);
+ err = fuse_file_cached_io_open(inode, ff);
if (err)
goto fail;
@@ -236,8 +256,10 @@ fail:
/* No more pending io and no new io possible to inode via open/mmapped file */
void fuse_file_io_release(struct fuse_file *ff, struct inode *inode)
{
+ struct fuse_inode *fi = get_fuse_inode(inode);
+
/*
- * Last parallel dio close allows caching inode io mode.
+ * Last passthrough file close allows caching inode io mode.
* Last caching file close exits caching inode io mode.
*/
switch (ff->iomode) {
@@ -245,10 +267,10 @@ void fuse_file_io_release(struct fuse_file *ff, struct inode *inode)
/* Nothing to do */
break;
case IOM_UNCACHED:
- fuse_file_uncached_io_end(inode, ff);
+ fuse_file_uncached_io_release(ff, fi);
break;
case IOM_CACHED:
- fuse_file_cached_io_end(inode, ff);
+ fuse_file_cached_io_release(ff, fi);
break;
}
}
diff --git a/fs/fuse/passthrough.c b/fs/fuse/passthrough.c
index 1567f0323858..9666d13884ce 100644
--- a/fs/fuse/passthrough.c
+++ b/fs/fuse/passthrough.c
@@ -225,7 +225,7 @@ int fuse_backing_open(struct fuse_conn *fc, struct fuse_backing_map *map)
goto out;
res = -EINVAL;
- if (map->flags)
+ if (map->flags || map->padding)
goto out;
file = fget(map->fd);
diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
index 322af827a232..bb3e941b9503 100644
--- a/fs/fuse/virtio_fs.c
+++ b/fs/fuse/virtio_fs.c
@@ -170,7 +170,7 @@ static ssize_t tag_show(struct kobject *kobj,
{
struct virtio_fs *fs = container_of(kobj, struct virtio_fs, kobj);
- return sysfs_emit(buf, fs->tag);
+ return sysfs_emit(buf, "%s\n", fs->tag);
}
static struct kobj_attribute virtio_fs_tag_attr = __ATTR_RO(tag);
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 974aca9c8ea8..10d5acd3f742 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -116,8 +116,7 @@ static int gfs2_write_jdata_folio(struct folio *folio,
* @folio: The folio to write
* @wbc: The writeback control
*
- * This is shared between writepage and writepages and implements the
- * core of the writepage operation. If a transaction is required then
+ * Implements the core of write back. If a transaction is required then
* the checked flag will have been set and the transaction will have
* already been started before this is called.
*/
@@ -755,6 +754,7 @@ static const struct address_space_operations gfs2_jdata_aops = {
.readahead = gfs2_readahead,
.dirty_folio = jdata_dirty_folio,
.bmap = gfs2_bmap,
+ .migrate_folio = buffer_migrate_folio,
.invalidate_folio = gfs2_invalidate_folio,
.release_folio = gfs2_release_folio,
.is_partially_uptodate = block_is_partially_uptodate,
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 789af5c8fade..1795c4e8dbf6 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1718,7 +1718,8 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
struct buffer_head *dibh, *bh;
struct gfs2_holder rd_gh;
unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
- u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift;
+ unsigned int bsize = 1 << bsize_shift;
+ u64 lblock = (offset + bsize - 1) >> bsize_shift;
__u16 start_list[GFS2_MAX_META_HEIGHT];
__u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL;
unsigned int start_aligned, end_aligned;
@@ -1729,7 +1730,7 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
u64 prev_bnr = 0;
__be64 *start, *end;
- if (offset >= maxsize) {
+ if (offset + bsize - 1 >= maxsize) {
/*
* The starting point lies beyond the allocated metadata;
* there are no blocks to deallocate.
@@ -1826,7 +1827,7 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
gfs2_assert_withdraw(sdp, bh);
if (gfs2_assert_withdraw(sdp,
prev_bnr != bh->b_blocknr)) {
- fs_emerg(sdp, "inode %llu, block:%llu, i_h:%u,"
+ fs_emerg(sdp, "inode %llu, block:%llu, i_h:%u, "
"s_h:%u, mp_h:%u\n",
(unsigned long long)ip->i_no_addr,
prev_bnr, ip->i_height, strip_h, mp_h);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 560e4624c09f..dbf1aede744c 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -562,15 +562,18 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf,
int ret = 0;
ret = gfs2_dirent_offset(GFS2_SB(inode), buf);
- if (ret < 0)
- goto consist_inode;
-
+ if (ret < 0) {
+ gfs2_consist_inode(GFS2_I(inode));
+ return ERR_PTR(-EIO);
+ }
offset = ret;
prev = NULL;
dent = buf + offset;
size = be16_to_cpu(dent->de_rec_len);
- if (gfs2_check_dirent(GFS2_SB(inode), dent, offset, size, len, 1))
- goto consist_inode;
+ if (gfs2_check_dirent(GFS2_SB(inode), dent, offset, size, len, 1)) {
+ gfs2_consist_inode(GFS2_I(inode));
+ return ERR_PTR(-EIO);
+ }
do {
ret = scan(dent, name, opaque);
if (ret)
@@ -582,8 +585,10 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf,
dent = buf + offset;
size = be16_to_cpu(dent->de_rec_len);
if (gfs2_check_dirent(GFS2_SB(inode), dent, offset, size,
- len, 0))
- goto consist_inode;
+ len, 0)) {
+ gfs2_consist_inode(GFS2_I(inode));
+ return ERR_PTR(-EIO);
+ }
} while(1);
switch(ret) {
@@ -597,10 +602,6 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf,
BUG_ON(ret > 0);
return ERR_PTR(ret);
}
-
-consist_inode:
- gfs2_consist_inode(GFS2_I(inode));
- return ERR_PTR(-EIO);
}
static int dirent_check_reclen(struct gfs2_inode *dip,
@@ -609,14 +610,16 @@ static int dirent_check_reclen(struct gfs2_inode *dip,
const void *ptr = d;
u16 rec_len = be16_to_cpu(d->de_rec_len);
- if (unlikely(rec_len < sizeof(struct gfs2_dirent)))
- goto broken;
+ if (unlikely(rec_len < sizeof(struct gfs2_dirent))) {
+ gfs2_consist_inode(dip);
+ return -EIO;
+ }
ptr += rec_len;
if (ptr < end_p)
return rec_len;
if (ptr == end_p)
return -ENOENT;
-broken:
+
gfs2_consist_inode(dip);
return -EIO;
}
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 4c42ada60ae7..08982937b5df 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -376,23 +376,23 @@ static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
}
/**
- * gfs2_allocate_page_backing - Allocate blocks for a write fault
- * @page: The (locked) page to allocate backing for
+ * gfs2_allocate_folio_backing - Allocate blocks for a write fault
+ * @folio: The (locked) folio to allocate backing for
* @length: Size of the allocation
*
- * We try to allocate all the blocks required for the page in one go. This
+ * We try to allocate all the blocks required for the folio in one go. This
* might fail for various reasons, so we keep trying until all the blocks to
- * back this page are allocated. If some of the blocks are already allocated,
+ * back this folio are allocated. If some of the blocks are already allocated,
* that is ok too.
*/
-static int gfs2_allocate_page_backing(struct page *page, unsigned int length)
+static int gfs2_allocate_folio_backing(struct folio *folio, size_t length)
{
- u64 pos = page_offset(page);
+ u64 pos = folio_pos(folio);
do {
struct iomap iomap = { };
- if (gfs2_iomap_alloc(page->mapping->host, pos, length, &iomap))
+ if (gfs2_iomap_alloc(folio->mapping->host, pos, length, &iomap))
return -EIO;
if (length < iomap.length)
@@ -414,16 +414,16 @@ static int gfs2_allocate_page_backing(struct page *page, unsigned int length)
static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
{
- struct page *page = vmf->page;
+ struct folio *folio = page_folio(vmf->page);
struct inode *inode = file_inode(vmf->vma->vm_file);
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_alloc_parms ap = {};
- u64 offset = page_offset(page);
+ u64 pos = folio_pos(folio);
unsigned int data_blocks, ind_blocks, rblocks;
vm_fault_t ret = VM_FAULT_LOCKED;
struct gfs2_holder gh;
- unsigned int length;
+ size_t length;
loff_t size;
int err;
@@ -436,23 +436,23 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
goto out_uninit;
}
- /* Check page index against inode size */
+ /* Check folio index against inode size */
size = i_size_read(inode);
- if (offset >= size) {
+ if (pos >= size) {
ret = VM_FAULT_SIGBUS;
goto out_unlock;
}
- /* Update file times before taking page lock */
+ /* Update file times before taking folio lock */
file_update_time(vmf->vma->vm_file);
- /* page is wholly or partially inside EOF */
- if (size - offset < PAGE_SIZE)
- length = size - offset;
+ /* folio is wholly or partially inside EOF */
+ if (size - pos < folio_size(folio))
+ length = size - pos;
else
- length = PAGE_SIZE;
+ length = folio_size(folio);
- gfs2_size_hint(vmf->vma->vm_file, offset, length);
+ gfs2_size_hint(vmf->vma->vm_file, pos, length);
set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
set_bit(GIF_SW_PAGED, &ip->i_flags);
@@ -463,11 +463,12 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
*/
if (!gfs2_is_stuffed(ip) &&
- !gfs2_write_alloc_required(ip, offset, length)) {
- lock_page(page);
- if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
+ !gfs2_write_alloc_required(ip, pos, length)) {
+ folio_lock(folio);
+ if (!folio_test_uptodate(folio) ||
+ folio->mapping != inode->i_mapping) {
ret = VM_FAULT_NOPAGE;
- unlock_page(page);
+ folio_unlock(folio);
}
goto out_unlock;
}
@@ -504,7 +505,7 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
goto out_trans_fail;
}
- /* Unstuff, if required, and allocate backing blocks for page */
+ /* Unstuff, if required, and allocate backing blocks for folio */
if (gfs2_is_stuffed(ip)) {
err = gfs2_unstuff_dinode(ip);
if (err) {
@@ -513,22 +514,22 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
}
}
- lock_page(page);
+ folio_lock(folio);
/* If truncated, we must retry the operation, we may have raced
* with the glock demotion code.
*/
- if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
+ if (!folio_test_uptodate(folio) || folio->mapping != inode->i_mapping) {
ret = VM_FAULT_NOPAGE;
goto out_page_locked;
}
- err = gfs2_allocate_page_backing(page, length);
+ err = gfs2_allocate_folio_backing(folio, length);
if (err)
ret = vmf_fs_error(err);
out_page_locked:
if (ret != VM_FAULT_LOCKED)
- unlock_page(page);
+ folio_unlock(folio);
out_trans_end:
gfs2_trans_end(sdp);
out_trans_fail:
@@ -540,8 +541,8 @@ out_unlock:
out_uninit:
gfs2_holder_uninit(&gh);
if (ret == VM_FAULT_LOCKED) {
- set_page_dirty(page);
- wait_for_stable_page(page);
+ folio_mark_dirty(folio);
+ folio_wait_stable(folio);
}
sb_end_pagefault(inode->i_sb);
return ret;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 34540f9d011c..9f11fc1e79eb 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -166,19 +166,45 @@ static bool glock_blocked_by_withdraw(struct gfs2_glock *gl)
return true;
}
-void gfs2_glock_free(struct gfs2_glock *gl)
+static void __gfs2_glock_free(struct gfs2_glock *gl)
{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
-
- gfs2_glock_assert_withdraw(gl, atomic_read(&gl->gl_revokes) == 0);
rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
smp_mb();
wake_up_glock(gl);
call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
+}
+
+void gfs2_glock_free(struct gfs2_glock *gl) {
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+
+ __gfs2_glock_free(gl);
+ if (atomic_dec_and_test(&sdp->sd_glock_disposal))
+ wake_up(&sdp->sd_kill_wait);
+}
+
+void gfs2_glock_free_later(struct gfs2_glock *gl) {
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+
+ spin_lock(&lru_lock);
+ list_add(&gl->gl_lru, &sdp->sd_dead_glocks);
+ spin_unlock(&lru_lock);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_kill_wait);
}
+static void gfs2_free_dead_glocks(struct gfs2_sbd *sdp)
+{
+ struct list_head *list = &sdp->sd_dead_glocks;
+
+ while(!list_empty(list)) {
+ struct gfs2_glock *gl;
+
+ gl = list_first_entry(list, struct gfs2_glock, gl_lru);
+ list_del_init(&gl->gl_lru);
+ __gfs2_glock_free(gl);
+ }
+}
+
/**
* gfs2_glock_hold() - increment reference count on glock
* @gl: The glock to hold
@@ -248,7 +274,7 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
* Enqueue the glock on the work queue. Passes one glock reference on to the
* work queue.
*/
-static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
+static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
/*
* We are holding the lockref spinlock, and the work was still
@@ -261,12 +287,6 @@ static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay)
}
}
-static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
- spin_lock(&gl->gl_lockref.lock);
- __gfs2_glock_queue_work(gl, delay);
- spin_unlock(&gl->gl_lockref.lock);
-}
-
static void __gfs2_glock_put(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
@@ -285,14 +305,6 @@ static void __gfs2_glock_put(struct gfs2_glock *gl)
sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
}
-/*
- * Cause the glock to be put in work queue context.
- */
-void gfs2_glock_queue_put(struct gfs2_glock *gl)
-{
- gfs2_glock_queue_work(gl, 0);
-}
-
/**
* gfs2_glock_put() - Decrement reference count on glock
* @gl: The glock to put
@@ -307,6 +319,23 @@ void gfs2_glock_put(struct gfs2_glock *gl)
__gfs2_glock_put(gl);
}
+/*
+ * gfs2_glock_put_async - Decrement reference count without sleeping
+ * @gl: The glock to put
+ *
+ * Decrement the reference count on glock immediately unless it is the last
+ * reference. Defer putting the last reference to work queue context.
+ */
+void gfs2_glock_put_async(struct gfs2_glock *gl)
+{
+ if (lockref_put_or_lock(&gl->gl_lockref))
+ return;
+
+ GLOCK_BUG_ON(gl, gl->gl_lockref.count != 1);
+ gfs2_glock_queue_work(gl, 0);
+ spin_unlock(&gl->gl_lockref.lock);
+}
+
/**
* may_grant - check if it's ok to grant a new lock
* @gl: The glock
@@ -591,7 +620,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
struct gfs2_holder *gh;
unsigned state = ret & LM_OUT_ST_MASK;
- spin_lock(&gl->gl_lockref.lock);
trace_gfs2_glock_state_change(gl, state);
state_change(gl, state);
gh = find_first_waiter(gl);
@@ -639,7 +667,6 @@ retry:
gl->gl_target, state);
GLOCK_BUG_ON(gl, 1);
}
- spin_unlock(&gl->gl_lockref.lock);
return;
}
@@ -662,7 +689,6 @@ retry:
}
out:
clear_bit(GLF_LOCK, &gl->gl_flags);
- spin_unlock(&gl->gl_lockref.lock);
}
static bool is_system_glock(struct gfs2_glock *gl)
@@ -690,6 +716,7 @@ __acquires(&gl->gl_lockref.lock)
{
const struct gfs2_glock_operations *glops = gl->gl_ops;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
int ret;
@@ -718,6 +745,9 @@ __acquires(&gl->gl_lockref.lock)
(gl->gl_state == LM_ST_EXCLUSIVE) ||
(lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
clear_bit(GLF_BLOCKING, &gl->gl_flags);
+ if (!glops->go_inval && !glops->go_sync)
+ goto skip_inval;
+
spin_unlock(&gl->gl_lockref.lock);
if (glops->go_sync) {
ret = glops->go_sync(gl);
@@ -730,6 +760,7 @@ __acquires(&gl->gl_lockref.lock)
fs_err(sdp, "Error %d syncing glock \n", ret);
gfs2_dump_glock(NULL, gl, true);
}
+ spin_lock(&gl->gl_lockref.lock);
goto skip_inval;
}
}
@@ -750,9 +781,10 @@ __acquires(&gl->gl_lockref.lock)
glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
}
+ spin_lock(&gl->gl_lockref.lock);
skip_inval:
- gfs2_glock_hold(gl);
+ gl->gl_lockref.count++;
/*
* Check for an error encountered since we called go_sync and go_inval.
* If so, we can't withdraw from the glock code because the withdraw
@@ -795,30 +827,36 @@ skip_inval:
clear_bit(GLF_LOCK, &gl->gl_flags);
clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
- goto out;
+ return;
} else {
clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
}
}
- if (sdp->sd_lockstruct.ls_ops->lm_lock) {
- /* lock_dlm */
- ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
+ if (ls->ls_ops->lm_lock) {
+ spin_unlock(&gl->gl_lockref.lock);
+ ret = ls->ls_ops->lm_lock(gl, target, lck_flags);
+ spin_lock(&gl->gl_lockref.lock);
+
if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
target == LM_ST_UNLOCKED &&
- test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
- finish_xmote(gl, target);
- gfs2_glock_queue_work(gl, 0);
+ test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
+ /*
+ * The lockspace has been released and the lock has
+ * been unlocked implicitly.
+ */
} else if (ret) {
fs_err(sdp, "lm_lock ret %d\n", ret);
- GLOCK_BUG_ON(gl, !gfs2_withdrawing_or_withdrawn(sdp));
+ target = gl->gl_state | LM_OUT_ERROR;
+ } else {
+ /* The operation will be completed asynchronously. */
+ return;
}
- } else { /* lock_nolock */
- finish_xmote(gl, target);
- gfs2_glock_queue_work(gl, 0);
}
-out:
- spin_lock(&gl->gl_lockref.lock);
+
+ /* Complete the operation now. */
+ finish_xmote(gl, target);
+ gfs2_glock_queue_work(gl, 0);
}
/**
@@ -834,8 +872,9 @@ __acquires(&gl->gl_lockref.lock)
{
struct gfs2_holder *gh = NULL;
- if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
+ if (test_bit(GLF_LOCK, &gl->gl_flags))
return;
+ set_bit(GLF_LOCK, &gl->gl_flags);
GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
@@ -865,7 +904,7 @@ out_sched:
clear_bit(GLF_LOCK, &gl->gl_flags);
smp_mb__after_atomic();
gl->gl_lockref.count++;
- __gfs2_glock_queue_work(gl, 0);
+ gfs2_glock_queue_work(gl, 0);
return;
out_unlock:
@@ -1071,11 +1110,12 @@ static void glock_work_func(struct work_struct *work)
struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
unsigned int drop_refs = 1;
- if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
+ spin_lock(&gl->gl_lockref.lock);
+ if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
+ clear_bit(GLF_REPLY_PENDING, &gl->gl_flags);
finish_xmote(gl, gl->gl_reply);
drop_refs++;
}
- spin_lock(&gl->gl_lockref.lock);
if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
gl->gl_state != LM_ST_UNLOCKED &&
gl->gl_demote_state != LM_ST_EXCLUSIVE) {
@@ -1096,12 +1136,12 @@ static void glock_work_func(struct work_struct *work)
drop_refs--;
if (gl->gl_name.ln_type != LM_TYPE_INODE)
delay = 0;
- __gfs2_glock_queue_work(gl, delay);
+ gfs2_glock_queue_work(gl, delay);
}
/*
* Drop the remaining glock references manually here. (Mind that
- * __gfs2_glock_queue_work depends on the lockref spinlock begin held
+ * gfs2_glock_queue_work depends on the lockref spinlock begin held
* here as well.)
*/
gl->gl_lockref.count -= drop_refs;
@@ -1606,7 +1646,7 @@ unlock:
test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
gl->gl_lockref.count++;
- __gfs2_glock_queue_work(gl, 0);
+ gfs2_glock_queue_work(gl, 0);
}
run_queue(gl, 1);
spin_unlock(&gl->gl_lockref.lock);
@@ -1672,7 +1712,7 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh)
!test_bit(GLF_DEMOTE, &gl->gl_flags) &&
gl->gl_name.ln_type == LM_TYPE_INODE)
delay = gl->gl_hold_time;
- __gfs2_glock_queue_work(gl, delay);
+ gfs2_glock_queue_work(gl, delay);
}
}
@@ -1896,7 +1936,7 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
delay = gl->gl_hold_time;
}
handle_callback(gl, state, delay, true);
- __gfs2_glock_queue_work(gl, delay);
+ gfs2_glock_queue_work(gl, delay);
spin_unlock(&gl->gl_lockref.lock);
}
@@ -1956,7 +1996,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
gl->gl_lockref.count++;
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
- __gfs2_glock_queue_work(gl, 0);
+ gfs2_glock_queue_work(gl, 0);
spin_unlock(&gl->gl_lockref.lock);
}
@@ -1976,6 +2016,14 @@ static int glock_cmp(void *priv, const struct list_head *a,
return 0;
}
+static bool can_free_glock(struct gfs2_glock *gl)
+{
+ bool held = gl->gl_state != LM_ST_UNLOCKED;
+
+ return !test_bit(GLF_LOCK, &gl->gl_flags) &&
+ gl->gl_lockref.count == held;
+}
+
/**
* gfs2_dispose_glock_lru - Demote a list of glocks
* @list: The list to dispose of
@@ -1990,37 +2038,38 @@ static int glock_cmp(void *priv, const struct list_head *a,
* private)
*/
-static void gfs2_dispose_glock_lru(struct list_head *list)
+static unsigned long gfs2_dispose_glock_lru(struct list_head *list)
__releases(&lru_lock)
__acquires(&lru_lock)
{
struct gfs2_glock *gl;
+ unsigned long freed = 0;
list_sort(NULL, list, glock_cmp);
while(!list_empty(list)) {
gl = list_first_entry(list, struct gfs2_glock, gl_lru);
- list_del_init(&gl->gl_lru);
- clear_bit(GLF_LRU, &gl->gl_flags);
if (!spin_trylock(&gl->gl_lockref.lock)) {
add_back_to_lru:
- list_add(&gl->gl_lru, &lru_list);
- set_bit(GLF_LRU, &gl->gl_flags);
- atomic_inc(&lru_count);
+ list_move(&gl->gl_lru, &lru_list);
continue;
}
- if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
+ if (!can_free_glock(gl)) {
spin_unlock(&gl->gl_lockref.lock);
goto add_back_to_lru;
}
+ list_del_init(&gl->gl_lru);
+ atomic_dec(&lru_count);
+ clear_bit(GLF_LRU, &gl->gl_flags);
+ freed++;
gl->gl_lockref.count++;
if (demote_ok(gl))
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
- WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
- __gfs2_glock_queue_work(gl, 0);
+ gfs2_glock_queue_work(gl, 0);
spin_unlock(&gl->gl_lockref.lock);
cond_resched_lock(&lru_lock);
}
+ return freed;
}
/**
@@ -2032,32 +2081,21 @@ add_back_to_lru:
* gfs2_dispose_glock_lru() above.
*/
-static long gfs2_scan_glock_lru(int nr)
+static unsigned long gfs2_scan_glock_lru(unsigned long nr)
{
struct gfs2_glock *gl, *next;
LIST_HEAD(dispose);
- long freed = 0;
+ unsigned long freed = 0;
spin_lock(&lru_lock);
list_for_each_entry_safe(gl, next, &lru_list, gl_lru) {
- if (nr-- <= 0)
+ if (!nr--)
break;
- /* Test for being demotable */
- if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
- if (!spin_trylock(&gl->gl_lockref.lock))
- continue;
- if (gl->gl_lockref.count <= 1 &&
- (gl->gl_state == LM_ST_UNLOCKED ||
- demote_ok(gl))) {
- list_move(&gl->gl_lru, &dispose);
- atomic_dec(&lru_count);
- freed++;
- }
- spin_unlock(&gl->gl_lockref.lock);
- }
+ if (can_free_glock(gl))
+ list_move(&gl->gl_lru, &dispose);
}
if (!list_empty(&dispose))
- gfs2_dispose_glock_lru(&dispose);
+ freed = gfs2_dispose_glock_lru(&dispose);
spin_unlock(&lru_lock);
return freed;
@@ -2148,8 +2186,11 @@ static void thaw_glock(struct gfs2_glock *gl)
return;
if (!lockref_get_not_dead(&gl->gl_lockref))
return;
+
+ spin_lock(&gl->gl_lockref.lock);
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
gfs2_glock_queue_work(gl, 0);
+ spin_unlock(&gl->gl_lockref.lock);
}
/**
@@ -2167,7 +2208,7 @@ static void clear_glock(struct gfs2_glock *gl)
gl->gl_lockref.count++;
if (gl->gl_state != LM_ST_UNLOCKED)
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
- __gfs2_glock_queue_work(gl, 0);
+ gfs2_glock_queue_work(gl, 0);
}
spin_unlock(&gl->gl_lockref.lock);
}
@@ -2225,6 +2266,8 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
wait_event_timeout(sdp->sd_kill_wait,
atomic_read(&sdp->sd_glock_disposal) == 0,
HZ * 600);
+ gfs2_lm_unmount(sdp);
+ gfs2_free_dead_glocks(sdp);
glock_hash_walk(dump_glock_func, sdp);
}
@@ -2529,8 +2572,7 @@ static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)
if (gl) {
if (n == 0)
return;
- if (!lockref_put_not_zero(&gl->gl_lockref))
- gfs2_glock_queue_put(gl);
+ gfs2_glock_put_async(gl);
}
for (;;) {
gl = rhashtable_walk_next(&gi->hti);
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 0114f3e0ebe0..19aef6d53267 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -172,7 +172,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
int create, struct gfs2_glock **glp);
struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl);
void gfs2_glock_put(struct gfs2_glock *gl);
-void gfs2_glock_queue_put(struct gfs2_glock *gl);
+void gfs2_glock_put_async(struct gfs2_glock *gl);
void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
u16 flags, struct gfs2_holder *gh,
@@ -252,6 +252,7 @@ void gfs2_gl_dq_holders(struct gfs2_sbd *sdp);
void gfs2_glock_thaw(struct gfs2_sbd *sdp);
void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
void gfs2_glock_free(struct gfs2_glock *gl);
+void gfs2_glock_free_later(struct gfs2_glock *gl);
int __init gfs2_glock_init(void);
void gfs2_glock_exit(void);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 45653cbc8a87..68677fb69a73 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -82,6 +82,9 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
spin_unlock(&sdp->sd_ail_lock);
gfs2_log_unlock(sdp);
+
+ if (gfs2_withdrawing(sdp))
+ gfs2_withdraw(sdp);
}
@@ -409,10 +412,14 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
struct inode *inode = &ip->i_inode;
bool is_new = inode->i_state & I_NEW;
- if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
- goto corrupt;
- if (unlikely(!is_new && inode_wrong_type(inode, mode)))
- goto corrupt;
+ if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) {
+ gfs2_consist_inode(ip);
+ return -EIO;
+ }
+ if (unlikely(!is_new && inode_wrong_type(inode, mode))) {
+ gfs2_consist_inode(ip);
+ return -EIO;
+ }
ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
inode->i_mode = mode;
if (is_new) {
@@ -449,26 +456,28 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
/* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
gfs2_set_inode_flags(inode);
height = be16_to_cpu(str->di_height);
- if (unlikely(height > sdp->sd_max_height))
- goto corrupt;
+ if (unlikely(height > sdp->sd_max_height)) {
+ gfs2_consist_inode(ip);
+ return -EIO;
+ }
ip->i_height = (u8)height;
depth = be16_to_cpu(str->di_depth);
- if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
- goto corrupt;
+ if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) {
+ gfs2_consist_inode(ip);
+ return -EIO;
+ }
ip->i_depth = (u8)depth;
ip->i_entries = be32_to_cpu(str->di_entries);
- if (gfs2_is_stuffed(ip) && inode->i_size > gfs2_max_stuffed_size(ip))
- goto corrupt;
-
+ if (gfs2_is_stuffed(ip) && inode->i_size > gfs2_max_stuffed_size(ip)) {
+ gfs2_consist_inode(ip);
+ return -EIO;
+ }
if (S_ISREG(inode->i_mode))
gfs2_set_aops(inode);
return 0;
-corrupt:
- gfs2_consist_inode(ip);
- return -EIO;
}
/**
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 95a334d64da2..60abd7050c99 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -838,6 +838,7 @@ struct gfs2_sbd {
/* For quiescing the filesystem */
struct gfs2_holder sd_freeze_gh;
struct mutex sd_freeze_mutex;
+ struct list_head sd_dead_glocks;
char sd_fsname[GFS2_FSNAME_LEN + 3 * sizeof(int) + 2];
char sd_table_name[GFS2_FSNAME_LEN];
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index d1ac5d0679ea..49059274a528 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -121,6 +121,11 @@ static void gdlm_ast(void *arg)
struct gfs2_glock *gl = arg;
unsigned ret = gl->gl_state;
+ /* If the glock is dead, we only react to a dlm_unlock() reply. */
+ if (__lockref_is_dead(&gl->gl_lockref) &&
+ gl->gl_lksb.sb_status != -DLM_EUNLOCK)
+ return;
+
gfs2_update_reply_times(gl);
BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
@@ -171,6 +176,9 @@ static void gdlm_bast(void *arg, int mode)
{
struct gfs2_glock *gl = arg;
+ if (__lockref_is_dead(&gl->gl_lockref))
+ return;
+
switch (mode) {
case DLM_LOCK_EX:
gfs2_glock_cb(gl, LM_ST_UNLOCKED);
@@ -291,8 +299,12 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
int error;
- if (gl->gl_lksb.sb_lkid == 0)
- goto out_free;
+ BUG_ON(!__lockref_is_dead(&gl->gl_lockref));
+
+ if (gl->gl_lksb.sb_lkid == 0) {
+ gfs2_glock_free(gl);
+ return;
+ }
clear_bit(GLF_BLOCKING, &gl->gl_flags);
gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
@@ -300,13 +312,23 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
gfs2_update_request_times(gl);
/* don't want to call dlm if we've unmounted the lock protocol */
- if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags))
- goto out_free;
- /* don't want to skip dlm_unlock writing the lvb when lock has one */
+ if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
+ gfs2_glock_free(gl);
+ return;
+ }
+
+ /*
+ * When the lockspace is released, all remaining glocks will be
+ * unlocked automatically. This is more efficient than unlocking them
+ * individually, but when the lock is held in DLM_LOCK_EX or
+ * DLM_LOCK_PW mode, the lock value block (LVB) will be lost.
+ */
if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
- !gl->gl_lksb.sb_lvbptr)
- goto out_free;
+ (!gl->gl_lksb.sb_lvbptr || gl->gl_state != LM_ST_EXCLUSIVE)) {
+ gfs2_glock_free_later(gl);
+ return;
+ }
again:
error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
@@ -321,10 +343,6 @@ again:
gl->gl_name.ln_type,
(unsigned long long)gl->gl_name.ln_number, error);
}
- return;
-
-out_free:
- gfs2_glock_free(gl);
}
static void gdlm_cancel(struct gfs2_glock *gl)
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 8cddf955ebc0..6ee6013fb825 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -786,7 +786,7 @@ void gfs2_glock_remove_revoke(struct gfs2_glock *gl)
{
if (atomic_dec_return(&gl->gl_revokes) == 0) {
clear_bit(GLF_LFLUSH, &gl->gl_flags);
- gfs2_glock_queue_put(gl);
+ gfs2_glock_put_async(gl);
}
}
@@ -1108,7 +1108,8 @@ repeat:
lops_before_commit(sdp, tr);
if (gfs2_withdrawing_or_withdrawn(sdp))
goto out_withdraw;
- gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE);
+ if (sdp->sd_jdesc)
+ gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE);
if (gfs2_withdrawing_or_withdrawn(sdp))
goto out_withdraw;
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index f814054c8cd0..2b26e8d529aa 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -32,14 +32,14 @@
static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
{
+ struct folio *folio = page_folio(page);
struct buffer_head *bh, *head;
int nr_underway = 0;
blk_opf_t write_flags = REQ_META | REQ_PRIO | wbc_to_write_flags(wbc);
- BUG_ON(!PageLocked(page));
- BUG_ON(!page_has_buffers(page));
+ BUG_ON(!folio_test_locked(folio));
- head = page_buffers(page);
+ head = folio_buffers(folio);
bh = head;
do {
@@ -55,7 +55,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
if (wbc->sync_mode != WB_SYNC_NONE) {
lock_buffer(bh);
} else if (!trylock_buffer(bh)) {
- redirty_page_for_writepage(wbc, page);
+ folio_redirty_for_writepage(wbc, folio);
continue;
}
if (test_clear_buffer_dirty(bh)) {
@@ -69,8 +69,8 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
* The page and its buffers are protected by PageWriteback(), so we can
* drop the bh refcounts early.
*/
- BUG_ON(PageWriteback(page));
- set_page_writeback(page);
+ BUG_ON(folio_test_writeback(folio));
+ folio_start_writeback(folio);
do {
struct buffer_head *next = bh->b_this_page;
@@ -80,10 +80,10 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
}
bh = next;
} while (bh != head);
- unlock_page(page);
+ folio_unlock(folio);
if (nr_underway == 0)
- end_page_writeback(page);
+ folio_end_writeback(folio);
return 0;
}
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 572d58e86296..227edbaddfbc 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -136,6 +136,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
atomic_set(&sdp->sd_log_in_flight, 0);
init_waitqueue_head(&sdp->sd_log_flush_wait);
mutex_init(&sdp->sd_freeze_mutex);
+ INIT_LIST_HEAD(&sdp->sd_dead_glocks);
return sdp;
@@ -184,22 +185,10 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent)
return 0;
}
-static void end_bio_io_page(struct bio *bio)
-{
- struct page *page = bio->bi_private;
-
- if (!bio->bi_status)
- SetPageUptodate(page);
- else
- pr_warn("error %d reading superblock\n", bio->bi_status);
- unlock_page(page);
-}
-
-static void gfs2_sb_in(struct gfs2_sbd *sdp, const void *buf)
+static void gfs2_sb_in(struct gfs2_sbd *sdp, const struct gfs2_sb *str)
{
struct gfs2_sb_host *sb = &sdp->sd_sb;
struct super_block *s = sdp->sd_vfs;
- const struct gfs2_sb *str = buf;
sb->sb_magic = be32_to_cpu(str->sb_header.mh_magic);
sb->sb_type = be32_to_cpu(str->sb_header.mh_type);
@@ -239,34 +228,26 @@ static void gfs2_sb_in(struct gfs2_sbd *sdp, const void *buf)
static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
{
struct super_block *sb = sdp->sd_vfs;
- struct gfs2_sb *p;
struct page *page;
- struct bio *bio;
+ struct bio_vec bvec;
+ struct bio bio;
+ int err;
- page = alloc_page(GFP_NOFS);
+ page = alloc_page(GFP_KERNEL);
if (unlikely(!page))
return -ENOMEM;
- ClearPageUptodate(page);
- ClearPageDirty(page);
- lock_page(page);
-
- bio = bio_alloc(sb->s_bdev, 1, REQ_OP_READ | REQ_META, GFP_NOFS);
- bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9);
- __bio_add_page(bio, page, PAGE_SIZE, 0);
+ bio_init(&bio, sb->s_bdev, &bvec, 1, REQ_OP_READ | REQ_META);
+ bio.bi_iter.bi_sector = sector * (sb->s_blocksize >> 9);
+ __bio_add_page(&bio, page, PAGE_SIZE, 0);
- bio->bi_end_io = end_bio_io_page;
- bio->bi_private = page;
- submit_bio(bio);
- wait_on_page_locked(page);
- bio_put(bio);
- if (!PageUptodate(page)) {
+ err = submit_bio_wait(&bio);
+ if (err) {
+ pr_warn("error %d reading superblock\n", err);
__free_page(page);
- return -EIO;
+ return err;
}
- p = kmap(page);
- gfs2_sb_in(sdp, p);
- kunmap(page);
+ gfs2_sb_in(sdp, page_address(page));
__free_page(page);
return gfs2_check_sb(sdp, silent);
}
@@ -1288,7 +1269,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
error = gfs2_make_fs_rw(sdp);
if (error) {
- gfs2_freeze_unlock(&sdp->sd_freeze_gh);
+ gfs2_freeze_unlock(sdp);
gfs2_destroy_threads(sdp);
fs_err(sdp, "can't make FS RW: %d\n", error);
goto fail_per_node;
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 26d6c1eea559..29c772816765 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -814,11 +814,11 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd)
bi = rgd->rd_bits + (length - 1);
if ((bi->bi_start + bi->bi_bytes) * GFS2_NBBY != rgd->rd_data) {
gfs2_lm(sdp,
- "ri_addr = %llu\n"
- "ri_length = %u\n"
- "ri_data0 = %llu\n"
- "ri_data = %u\n"
- "ri_bitbytes = %u\n"
+ "ri_addr=%llu "
+ "ri_length=%u "
+ "ri_data0=%llu "
+ "ri_data=%u "
+ "ri_bitbytes=%u "
"start=%u len=%u offset=%u\n",
(unsigned long long)rgd->rd_addr,
rgd->rd_length,
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index e5f79466340d..7a5aedfcd52a 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -67,9 +67,13 @@ void gfs2_jindex_free(struct gfs2_sbd *sdp)
sdp->sd_journals = 0;
spin_unlock(&sdp->sd_jindex_spin);
+ down_write(&sdp->sd_log_flush_lock);
sdp->sd_jdesc = NULL;
+ up_write(&sdp->sd_log_flush_lock);
+
while (!list_empty(&list)) {
jd = list_first_entry(&list, struct gfs2_jdesc, jd_list);
+ BUG_ON(jd->jd_log_bio);
gfs2_free_journal_extents(jd);
list_del(&jd->jd_list);
iput(jd->jd_inode);
@@ -354,7 +358,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
list_add(&lfcc->list, &list);
}
- gfs2_freeze_unlock(&sdp->sd_freeze_gh);
+ gfs2_freeze_unlock(sdp);
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
LM_FLAG_NOEXP | GL_NOPID,
@@ -378,7 +382,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
if (!error)
goto out; /* success */
- gfs2_freeze_unlock(&sdp->sd_freeze_gh);
+ gfs2_freeze_unlock(sdp);
relock_shared:
error2 = gfs2_freeze_lock_shared(sdp);
@@ -617,7 +621,7 @@ restart:
/* Release stuff */
- gfs2_freeze_unlock(&sdp->sd_freeze_gh);
+ gfs2_freeze_unlock(sdp);
iput(sdp->sd_jindex);
iput(sdp->sd_statfs_inode);
@@ -646,10 +650,7 @@ restart:
gfs2_gl_hash_clear(sdp);
truncate_inode_pages_final(&sdp->sd_aspace);
gfs2_delete_debugfs_file(sdp);
- /* Unmount the locking protocol */
- gfs2_lm_unmount(sdp);
- /* At this point, we're through participating in the lockspace */
gfs2_sys_fs_del(sdp);
free_sbd(sdp);
}
@@ -706,7 +707,7 @@ void gfs2_freeze_func(struct work_struct *work)
if (error)
goto freeze_failed;
- gfs2_freeze_unlock(&sdp->sd_freeze_gh);
+ gfs2_freeze_unlock(sdp);
set_bit(SDF_FROZEN, &sdp->sd_flags);
error = gfs2_do_thaw(sdp);
@@ -811,7 +812,7 @@ static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who)
}
atomic_inc(&sb->s_active);
- gfs2_freeze_unlock(&sdp->sd_freeze_gh);
+ gfs2_freeze_unlock(sdp);
error = gfs2_do_thaw(sdp);
@@ -832,7 +833,7 @@ void gfs2_thaw_freeze_initiator(struct super_block *sb)
if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags))
goto out;
- gfs2_freeze_unlock(&sdp->sd_freeze_gh);
+ gfs2_freeze_unlock(sdp);
out:
mutex_unlock(&sdp->sd_freeze_mutex);
@@ -1045,7 +1046,7 @@ static int gfs2_drop_inode(struct inode *inode)
gfs2_glock_hold(gl);
if (!gfs2_queue_try_to_evict(gl))
- gfs2_glock_queue_put(gl);
+ gfs2_glock_put_async(gl);
return 0;
}
@@ -1251,7 +1252,7 @@ out_qs:
static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
{
if (current->flags & PF_MEMALLOC)
- gfs2_glock_queue_put(gl);
+ gfs2_glock_put_async(gl);
else
gfs2_glock_put(gl);
}
@@ -1261,7 +1262,6 @@ static bool gfs2_upgrade_iopen_glock(struct inode *inode)
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_holder *gh = &ip->i_iopen_gh;
- long timeout = 5 * HZ;
int error;
gh->gh_flags |= GL_NOCACHE;
@@ -1292,10 +1292,10 @@ static bool gfs2_upgrade_iopen_glock(struct inode *inode)
if (error)
return false;
- timeout = wait_event_interruptible_timeout(sdp->sd_async_glock_wait,
+ wait_event_interruptible_timeout(sdp->sd_async_glock_wait,
!test_bit(HIF_WAIT, &gh->gh_iflags) ||
test_bit(GLF_DEMOTE, &ip->i_gl->gl_flags),
- timeout);
+ 5 * HZ);
if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) {
gfs2_glock_dq(gh);
return false;
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 250f340cb44d..ecc699f8d9fc 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -88,7 +88,7 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
"Withdraw In Prog: %d\n"
"Remote Withdraw: %d\n"
"Withdraw Recovery: %d\n"
- "Deactivating: %d\n"
+ "Killing: %d\n"
"sd_log_error: %d\n"
"sd_log_flush_lock: %d\n"
"sd_log_num_revoke: %u\n"
@@ -336,7 +336,7 @@ static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len
return -EINVAL;
if (!test_and_set_bit(SDF_DEMOTE, &sdp->sd_flags))
fs_info(sdp, "demote interface used\n");
- rv = gfs2_glock_get(sdp, glnum, glops, 0, &gl);
+ rv = gfs2_glock_get(sdp, glnum, glops, NO_CREATE, &gl);
if (rv)
return rv;
gfs2_glock_cb(gl, glmode);
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index f52141ce9485..af4758d8d894 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -109,10 +109,10 @@ int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp)
return error;
}
-void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh)
+void gfs2_freeze_unlock(struct gfs2_sbd *sdp)
{
- if (gfs2_holder_initialized(freeze_gh))
- gfs2_glock_dq_uninit(freeze_gh);
+ if (gfs2_holder_initialized(&sdp->sd_freeze_gh))
+ gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
}
static void signal_our_withdraw(struct gfs2_sbd *sdp)
@@ -255,7 +255,7 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
gfs2_glock_nq(&sdp->sd_live_gh);
}
- gfs2_glock_queue_put(live_gl); /* drop extra reference we acquired */
+ gfs2_glock_put(live_gl); /* drop extra reference we acquired */
clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
/*
@@ -350,7 +350,6 @@ int gfs2_withdraw(struct gfs2_sbd *sdp)
fs_err(sdp, "telling LM to unmount\n");
lm->lm_unmount(sdp);
}
- set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
fs_err(sdp, "File system withdrawn\n");
dump_stack();
clear_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags);
@@ -376,8 +375,8 @@ void gfs2_assert_withdraw_i(struct gfs2_sbd *sdp, char *assertion,
return;
fs_err(sdp,
- "fatal: assertion \"%s\" failed\n"
- " function = %s, file = %s, line = %u\n",
+ "fatal: assertion \"%s\" failed - "
+ "function = %s, file = %s, line = %u\n",
assertion, function, file, line);
/*
@@ -407,7 +406,8 @@ void gfs2_assert_warn_i(struct gfs2_sbd *sdp, char *assertion,
return;
if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW)
- fs_warn(sdp, "warning: assertion \"%s\" failed at function = %s, file = %s, line = %u\n",
+ fs_warn(sdp, "warning: assertion \"%s\" failed - "
+ "function = %s, file = %s, line = %u\n",
assertion, function, file, line);
if (sdp->sd_args.ar_debug)
@@ -416,10 +416,10 @@ void gfs2_assert_warn_i(struct gfs2_sbd *sdp, char *assertion,
dump_stack();
if (sdp->sd_args.ar_errors == GFS2_ERRORS_PANIC)
- panic("GFS2: fsid=%s: warning: assertion \"%s\" failed\n"
- "GFS2: fsid=%s: function = %s, file = %s, line = %u\n",
+ panic("GFS2: fsid=%s: warning: assertion \"%s\" failed - "
+ "function = %s, file = %s, line = %u\n",
sdp->sd_fsname, assertion,
- sdp->sd_fsname, function, file, line);
+ function, file, line);
sdp->sd_last_warning = jiffies;
}
@@ -432,7 +432,8 @@ void gfs2_consist_i(struct gfs2_sbd *sdp, const char *function,
char *file, unsigned int line)
{
gfs2_lm(sdp,
- "fatal: filesystem consistency error - function = %s, file = %s, line = %u\n",
+ "fatal: filesystem consistency error - "
+ "function = %s, file = %s, line = %u\n",
function, file, line);
gfs2_withdraw(sdp);
}
@@ -447,9 +448,9 @@ void gfs2_consist_inode_i(struct gfs2_inode *ip,
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
gfs2_lm(sdp,
- "fatal: filesystem consistency error\n"
- " inode = %llu %llu\n"
- " function = %s, file = %s, line = %u\n",
+ "fatal: filesystem consistency error - "
+ "inode = %llu %llu, "
+ "function = %s, file = %s, line = %u\n",
(unsigned long long)ip->i_no_formal_ino,
(unsigned long long)ip->i_no_addr,
function, file, line);
@@ -470,9 +471,9 @@ void gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd,
sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
gfs2_rgrp_dump(NULL, rgd, fs_id_buf);
gfs2_lm(sdp,
- "fatal: filesystem consistency error\n"
- " RG = %llu\n"
- " function = %s, file = %s, line = %u\n",
+ "fatal: filesystem consistency error - "
+ "RG = %llu, "
+ "function = %s, file = %s, line = %u\n",
(unsigned long long)rgd->rd_addr,
function, file, line);
gfs2_dump_glock(NULL, rgd->rd_gl, 1);
@@ -486,16 +487,16 @@ void gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd,
*/
int gfs2_meta_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
- const char *type, const char *function, char *file,
+ const char *function, char *file,
unsigned int line)
{
int me;
gfs2_lm(sdp,
- "fatal: invalid metadata block\n"
- " bh = %llu (%s)\n"
- " function = %s, file = %s, line = %u\n",
- (unsigned long long)bh->b_blocknr, type,
+ "fatal: invalid metadata block - "
+ "bh = %llu (bad magic number), "
+ "function = %s, file = %s, line = %u\n",
+ (unsigned long long)bh->b_blocknr,
function, file, line);
me = gfs2_withdraw(sdp);
return (me) ? -1 : -2;
@@ -514,9 +515,9 @@ int gfs2_metatype_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
int me;
gfs2_lm(sdp,
- "fatal: invalid metadata block\n"
- " bh = %llu (type: exp=%u, found=%u)\n"
- " function = %s, file = %s, line = %u\n",
+ "fatal: invalid metadata block - "
+ "bh = %llu (type: exp=%u, found=%u), "
+ "function = %s, file = %s, line = %u\n",
(unsigned long long)bh->b_blocknr, type, t,
function, file, line);
me = gfs2_withdraw(sdp);
@@ -533,8 +534,8 @@ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function, char *file,
unsigned int line)
{
gfs2_lm(sdp,
- "fatal: I/O error\n"
- " function = %s, file = %s, line = %u\n",
+ "fatal: I/O error - "
+ "function = %s, file = %s, line = %u\n",
function, file, line);
return gfs2_withdraw(sdp);
}
@@ -551,9 +552,9 @@ void gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
if (gfs2_withdrawing_or_withdrawn(sdp))
return;
- fs_err(sdp, "fatal: I/O error\n"
- " block = %llu\n"
- " function = %s, file = %s, line = %u\n",
+ fs_err(sdp, "fatal: I/O error - "
+ "block = %llu, "
+ "function = %s, file = %s, line = %u\n",
(unsigned long long)bh->b_blocknr, function, file, line);
if (withdraw)
gfs2_withdraw(sdp);
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index ba071998461f..27d03b641024 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -92,7 +92,7 @@ gfs2_consist_rgrpd_i((rgd), __func__, __FILE__, __LINE__)
int gfs2_meta_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
- const char *type, const char *function,
+ const char *function,
char *file, unsigned int line);
static inline int gfs2_meta_check(struct gfs2_sbd *sdp,
@@ -123,7 +123,7 @@ static inline int gfs2_metatype_check_i(struct gfs2_sbd *sdp,
u32 magic = be32_to_cpu(mh->mh_magic);
u16 t = be32_to_cpu(mh->mh_type);
if (unlikely(magic != GFS2_MAGIC))
- return gfs2_meta_check_ii(sdp, bh, "magic number", function,
+ return gfs2_meta_check_ii(sdp, bh, function,
file, line);
if (unlikely(t != type))
return gfs2_metatype_check_ii(sdp, bh, type, t, function,
@@ -150,7 +150,7 @@ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function,
int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
bool verbose);
int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp);
-void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh);
+void gfs2_freeze_unlock(struct gfs2_sbd *sdp);
#define gfs2_io_error(sdp) \
gfs2_io_error_i((sdp), __func__, __FILE__, __LINE__)
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 8c96ba6230d1..17ae5070a90e 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -96,30 +96,34 @@ static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
return -EIO;
for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) {
- if (!GFS2_EA_REC_LEN(ea))
- goto fail;
+ if (!GFS2_EA_REC_LEN(ea)) {
+ gfs2_consist_inode(ip);
+ return -EIO;
+ }
if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <=
- bh->b_data + bh->b_size))
- goto fail;
- if (!gfs2_eatype_valid(sdp, ea->ea_type))
- goto fail;
+ bh->b_data + bh->b_size)) {
+ gfs2_consist_inode(ip);
+ return -EIO;
+ }
+ if (!gfs2_eatype_valid(sdp, ea->ea_type)) {
+ gfs2_consist_inode(ip);
+ return -EIO;
+ }
error = ea_call(ip, bh, ea, prev, data);
if (error)
return error;
if (GFS2_EA_IS_LAST(ea)) {
if ((char *)GFS2_EA2NEXT(ea) !=
- bh->b_data + bh->b_size)
- goto fail;
+ bh->b_data + bh->b_size) {
+ gfs2_consist_inode(ip);
+ return -EIO;
+ }
break;
}
}
return error;
-
-fail:
- gfs2_consist_inode(ip);
- return -EIO;
}
static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index 9c9ff6b8c6f7..5a400259ae74 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -400,21 +400,19 @@ static int name_len(const char *xattr_name, int xattr_name_len)
return len;
}
-static int copy_name(char *buffer, const char *xattr_name, int name_len)
+static ssize_t copy_name(char *buffer, const char *xattr_name, int name_len)
{
- int len = name_len;
- int offset = 0;
+ ssize_t len;
- if (!is_known_namespace(xattr_name)) {
- memcpy(buffer, XATTR_MAC_OSX_PREFIX, XATTR_MAC_OSX_PREFIX_LEN);
- offset += XATTR_MAC_OSX_PREFIX_LEN;
- len += XATTR_MAC_OSX_PREFIX_LEN;
- }
-
- strncpy(buffer + offset, xattr_name, name_len);
- memset(buffer + offset + name_len, 0, 1);
- len += 1;
+ if (!is_known_namespace(xattr_name))
+ len = scnprintf(buffer, name_len + XATTR_MAC_OSX_PREFIX_LEN,
+ "%s%s", XATTR_MAC_OSX_PREFIX, xattr_name);
+ else
+ len = strscpy(buffer, xattr_name, name_len + 1);
+ /* include NUL-byte in length for non-empty name */
+ if (len >= 0)
+ len++;
return len;
}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 6502c7e776d1..412f295acebe 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -40,7 +40,7 @@
#include <linux/sched/mm.h>
static const struct address_space_operations hugetlbfs_aops;
-const struct file_operations hugetlbfs_file_operations;
+static const struct file_operations hugetlbfs_file_operations;
static const struct inode_operations hugetlbfs_dir_inode_operations;
static const struct inode_operations hugetlbfs_inode_operations;
@@ -176,14 +176,12 @@ hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
- info.flags = 0;
info.length = len;
info.low_limit = current->mm->mmap_base;
info.high_limit = arch_get_mmap_end(addr, len, flags);
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
- info.align_offset = 0;
return vm_unmapped_area(&info);
}
@@ -192,14 +190,13 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {};
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = PAGE_SIZE;
info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base);
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
- info.align_offset = 0;
addr = vm_unmapped_area(&info);
/*
@@ -249,11 +246,11 @@ generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
}
/*
- * Use mm->get_unmapped_area value as a hint to use topdown routine.
+ * Use MMF_TOPDOWN flag as a hint to use topdown routine.
* If architectures have special needs, they should define their own
* version of hugetlb_get_unmapped_area.
*/
- if (mm->get_unmapped_area == arch_get_unmapped_area_topdown)
+ if (test_bit(MMF_TOPDOWN, &mm->flags))
return hugetlb_get_unmapped_area_topdown(file, addr, len,
pgoff, flags);
return hugetlb_get_unmapped_area_bottomup(file, addr, len,
@@ -1301,13 +1298,14 @@ static void init_once(void *foo)
inode_init_once(&ei->vfs_inode);
}
-const struct file_operations hugetlbfs_file_operations = {
+static const struct file_operations hugetlbfs_file_operations = {
.read_iter = hugetlbfs_read_iter,
.mmap = hugetlbfs_file_mmap,
.fsync = noop_fsync,
.get_unmapped_area = hugetlb_get_unmapped_area,
.llseek = default_llseek,
.fallocate = hugetlbfs_fallocate,
+ .fop_flags = FOP_HUGE_PAGES,
};
static const struct inode_operations hugetlbfs_dir_inode_operations = {
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 1d5abfdf0f22..64776891120c 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -769,7 +769,7 @@ static int ioctl_getfsuuid(struct file *file, void __user *argp)
struct fsuuid2 u = { .len = sb->s_uuid_len, };
if (!sb->s_uuid_len)
- return -ENOIOCTLCMD;
+ return -ENOTTY;
memcpy(&u.uuid[0], &sb->s_uuid, sb->s_uuid_len);
@@ -781,7 +781,7 @@ static int ioctl_get_fs_sysfs_path(struct file *file, void __user *argp)
struct super_block *sb = file_inode(file)->i_sb;
if (!strlen(sb->s_sysfs_name))
- return -ENOIOCTLCMD;
+ return -ENOTTY;
struct fs_sysfs_path u = {};
@@ -796,6 +796,9 @@ static int ioctl_get_fs_sysfs_path(struct file *file, void __user *argp)
*
* When you add any new common ioctls to the switches above and below,
* please ensure they have compatible arguments in compat mode.
+ *
+ * The LSM mailing list should also be notified of any command additions or
+ * changes, as specific LSMs may be affected.
*/
static int do_vfs_ioctl(struct file *filp, unsigned int fd,
unsigned int cmd, unsigned long arg)
diff --git a/fs/iomap/Makefile b/fs/iomap/Makefile
index fc070184b7fa..381d76c5c232 100644
--- a/fs/iomap/Makefile
+++ b/fs/iomap/Makefile
@@ -4,7 +4,7 @@
# All Rights Reserved.
#
-ccflags-y += -I $(srctree)/$(src) # needed for trace events
+ccflags-y += -I $(src) # needed for trace events
obj-$(CONFIG_FS_IOMAP) += iomap.o
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 4e8e41c8b3c0..41c8f0c68ef5 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -824,12 +824,11 @@ static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
out_unlock:
__iomap_put_folio(iter, pos, 0, folio);
- iomap_write_failed(iter->inode, pos, len);
return status;
}
-static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
+static bool __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
size_t copied, struct folio *folio)
{
flush_dcache_folio(folio);
@@ -846,14 +845,14 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
* redo the whole thing.
*/
if (unlikely(copied < len && !folio_test_uptodate(folio)))
- return 0;
+ return false;
iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len);
iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied);
filemap_dirty_folio(inode->i_mapping, folio);
- return copied;
+ return true;
}
-static size_t iomap_write_end_inline(const struct iomap_iter *iter,
+static void iomap_write_end_inline(const struct iomap_iter *iter,
struct folio *folio, loff_t pos, size_t copied)
{
const struct iomap *iomap = &iter->iomap;
@@ -868,42 +867,32 @@ static size_t iomap_write_end_inline(const struct iomap_iter *iter,
kunmap_local(addr);
mark_inode_dirty(iter->inode);
- return copied;
}
-/* Returns the number of bytes copied. May be 0. Cannot be an errno. */
-static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
+/*
+ * Returns true if all copied bytes have been written to the pagecache,
+ * otherwise return false.
+ */
+static bool iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
size_t copied, struct folio *folio)
{
const struct iomap *srcmap = iomap_iter_srcmap(iter);
- loff_t old_size = iter->inode->i_size;
- size_t ret;
if (srcmap->type == IOMAP_INLINE) {
- ret = iomap_write_end_inline(iter, folio, pos, copied);
- } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
- ret = block_write_end(NULL, iter->inode->i_mapping, pos, len,
- copied, &folio->page, NULL);
- } else {
- ret = __iomap_write_end(iter->inode, pos, len, copied, folio);
+ iomap_write_end_inline(iter, folio, pos, copied);
+ return true;
}
- /*
- * Update the in-memory inode size after copying the data into the page
- * cache. It's up to the file system to write the updated size to disk,
- * preferably after I/O completion so that no stale data is exposed.
- */
- if (pos + ret > old_size) {
- i_size_write(iter->inode, pos + ret);
- iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
+ if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
+ size_t bh_written;
+
+ bh_written = block_write_end(NULL, iter->inode->i_mapping, pos,
+ len, copied, &folio->page, NULL);
+ WARN_ON_ONCE(bh_written != copied && bh_written != 0);
+ return bh_written == copied;
}
- __iomap_put_folio(iter, pos, ret, folio);
- if (old_size < pos)
- pagecache_isize_extended(iter->inode, old_size, pos);
- if (ret < len)
- iomap_write_failed(iter->inode, pos + ret, len - ret);
- return ret;
+ return __iomap_write_end(iter->inode, pos, len, copied, folio);
}
static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
@@ -911,16 +900,18 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
loff_t length = iomap_length(iter);
size_t chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
loff_t pos = iter->pos;
- ssize_t written = 0;
+ ssize_t total_written = 0;
long status = 0;
struct address_space *mapping = iter->inode->i_mapping;
unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
do {
struct folio *folio;
+ loff_t old_size;
size_t offset; /* Offset into folio */
size_t bytes; /* Bytes to write to folio */
size_t copied; /* Bytes copied from user */
+ size_t written; /* Bytes have been written */
bytes = iov_iter_count(i);
retry:
@@ -950,8 +941,10 @@ retry:
}
status = iomap_write_begin(iter, pos, bytes, &folio);
- if (unlikely(status))
+ if (unlikely(status)) {
+ iomap_write_failed(iter->inode, pos, bytes);
break;
+ }
if (iter->iomap.flags & IOMAP_F_STALE)
break;
@@ -963,19 +956,37 @@ retry:
flush_dcache_folio(folio);
copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
- status = iomap_write_end(iter, pos, bytes, copied, folio);
+ written = iomap_write_end(iter, pos, bytes, copied, folio) ?
+ copied : 0;
+
+ /*
+ * Update the in-memory inode size after copying the data into
+ * the page cache. It's up to the file system to write the
+ * updated size to disk, preferably after I/O completion so that
+ * no stale data is exposed. Only once that's done can we
+ * unlock and release the folio.
+ */
+ old_size = iter->inode->i_size;
+ if (pos + written > old_size) {
+ i_size_write(iter->inode, pos + written);
+ iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
+ }
+ __iomap_put_folio(iter, pos, written, folio);
- if (unlikely(copied != status))
- iov_iter_revert(i, copied - status);
+ if (old_size < pos)
+ pagecache_isize_extended(iter->inode, old_size, pos);
cond_resched();
- if (unlikely(status == 0)) {
+ if (unlikely(written == 0)) {
/*
* A short copy made iomap_write_end() reject the
* thing entirely. Might be memory poisoning
* halfway through, might be a race with munmap,
* might be severe memory pressure.
*/
+ iomap_write_failed(iter->inode, pos, bytes);
+ iov_iter_revert(i, copied);
+
if (chunk > PAGE_SIZE)
chunk /= 2;
if (copied) {
@@ -983,17 +994,17 @@ retry:
goto retry;
}
} else {
- pos += status;
- written += status;
- length -= status;
+ pos += written;
+ total_written += written;
+ length -= written;
}
} while (iov_iter_count(i) && length);
if (status == -EAGAIN) {
- iov_iter_revert(i, written);
+ iov_iter_revert(i, total_written);
return -EAGAIN;
}
- return written ? written : status;
+ return total_written ? total_written : status;
}
ssize_t
@@ -1322,6 +1333,7 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
int status;
size_t offset;
size_t bytes = min_t(u64, SIZE_MAX, length);
+ bool ret;
status = iomap_write_begin(iter, pos, bytes, &folio);
if (unlikely(status))
@@ -1333,8 +1345,9 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
if (bytes > folio_size(folio) - offset)
bytes = folio_size(folio) - offset;
- bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
- if (WARN_ON_ONCE(bytes == 0))
+ ret = iomap_write_end(iter, pos, bytes, bytes, folio);
+ __iomap_put_folio(iter, pos, bytes, folio);
+ if (WARN_ON_ONCE(!ret))
return -EIO;
cond_resched();
@@ -1383,6 +1396,7 @@ static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
int status;
size_t offset;
size_t bytes = min_t(u64, SIZE_MAX, length);
+ bool ret;
status = iomap_write_begin(iter, pos, bytes, &folio);
if (status)
@@ -1397,8 +1411,9 @@ static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
folio_zero_range(folio, offset, bytes);
folio_mark_accessed(folio);
- bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
- if (WARN_ON_ONCE(bytes == 0))
+ ret = iomap_write_end(iter, pos, bytes, bytes, folio);
+ __iomap_put_folio(iter, pos, bytes, folio);
+ if (WARN_ON_ONCE(!ret))
return -EIO;
pos += bytes;
@@ -1958,18 +1973,13 @@ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
return error;
}
-static int iomap_do_writepage(struct folio *folio,
- struct writeback_control *wbc, void *data)
-{
- return iomap_writepage_map(data, wbc, folio);
-}
-
int
iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
struct iomap_writepage_ctx *wpc,
const struct iomap_writeback_ops *ops)
{
- int ret;
+ struct folio *folio = NULL;
+ int error;
/*
* Writeback from reclaim context should never happen except in the case
@@ -1980,8 +1990,9 @@ iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
return -EIO;
wpc->ops = ops;
- ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc);
- return iomap_submit_ioend(wpc, ret);
+ while ((folio = writeback_iter(mapping, wbc, folio, &error)))
+ error = iomap_writepage_map(wpc, wbc, folio);
+ return iomap_submit_ioend(wpc, error);
}
EXPORT_SYMBOL_GPL(iomap_writepages);
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 1c97e64c4784..951f78634adf 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -337,8 +337,6 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
/* Checkpoint list management */
-enum shrink_type {SHRINK_DESTROY, SHRINK_BUSY_STOP, SHRINK_BUSY_SKIP};
-
/*
* journal_shrink_one_cp_list
*
@@ -350,7 +348,7 @@ enum shrink_type {SHRINK_DESTROY, SHRINK_BUSY_STOP, SHRINK_BUSY_SKIP};
* Called with j_list_lock held.
*/
static unsigned long journal_shrink_one_cp_list(struct journal_head *jh,
- enum shrink_type type,
+ enum jbd2_shrink_type type,
bool *released)
{
struct journal_head *last_jh;
@@ -367,12 +365,12 @@ static unsigned long journal_shrink_one_cp_list(struct journal_head *jh,
jh = next_jh;
next_jh = jh->b_cpnext;
- if (type == SHRINK_DESTROY) {
+ if (type == JBD2_SHRINK_DESTROY) {
ret = __jbd2_journal_remove_checkpoint(jh);
} else {
ret = jbd2_journal_try_remove_checkpoint(jh);
if (ret < 0) {
- if (type == SHRINK_BUSY_SKIP)
+ if (type == JBD2_SHRINK_BUSY_SKIP)
continue;
break;
}
@@ -439,7 +437,7 @@ again:
tid = transaction->t_tid;
freed = journal_shrink_one_cp_list(transaction->t_checkpoint_list,
- SHRINK_BUSY_SKIP, &released);
+ JBD2_SHRINK_BUSY_SKIP, &released);
nr_freed += freed;
(*nr_to_scan) -= min(*nr_to_scan, freed);
if (*nr_to_scan == 0)
@@ -472,21 +470,25 @@ out:
* journal_clean_checkpoint_list
*
* Find all the written-back checkpoint buffers in the journal and release them.
- * If 'destroy' is set, release all buffers unconditionally.
+ * If 'type' is JBD2_SHRINK_DESTROY, release all buffers unconditionally. If
+ * 'type' is JBD2_SHRINK_BUSY_STOP, will stop release buffers if encounters a
+ * busy buffer. To avoid wasting CPU cycles scanning the buffer list in some
+ * cases, don't pass JBD2_SHRINK_BUSY_SKIP 'type' for this function.
*
* Called with j_list_lock held.
*/
-void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
+void __jbd2_journal_clean_checkpoint_list(journal_t *journal,
+ enum jbd2_shrink_type type)
{
transaction_t *transaction, *last_transaction, *next_transaction;
- enum shrink_type type;
bool released;
+ WARN_ON_ONCE(type == JBD2_SHRINK_BUSY_SKIP);
+
transaction = journal->j_checkpoint_transactions;
if (!transaction)
return;
- type = destroy ? SHRINK_DESTROY : SHRINK_BUSY_STOP;
last_transaction = transaction->t_cpprev;
next_transaction = transaction;
do {
@@ -527,7 +529,7 @@ void jbd2_journal_destroy_checkpoint(journal_t *journal)
spin_unlock(&journal->j_list_lock);
break;
}
- __jbd2_journal_clean_checkpoint_list(journal, true);
+ __jbd2_journal_clean_checkpoint_list(journal, JBD2_SHRINK_DESTROY);
spin_unlock(&journal->j_list_lock);
cond_resched();
}
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 5e122586e06e..75ea4e9a5cab 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -501,7 +501,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
* frees some memory
*/
spin_lock(&journal->j_list_lock);
- __jbd2_journal_clean_checkpoint_list(journal, false);
+ __jbd2_journal_clean_checkpoint_list(journal, JBD2_SHRINK_BUSY_STOP);
spin_unlock(&journal->j_list_lock);
jbd2_debug(3, "JBD2: commit phase 1\n");
@@ -571,7 +571,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
J_ASSERT(commit_transaction->t_nr_buffers <=
atomic_read(&commit_transaction->t_outstanding_credits));
- err = 0;
bufs = 0;
descriptor = NULL;
while (commit_transaction->t_buffers) {
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
index 00224f3a8d6e..defb4162c3d5 100644
--- a/fs/jffs2/xattr.c
+++ b/fs/jffs2/xattr.c
@@ -1110,6 +1110,9 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
return rc;
request = PAD(sizeof(struct jffs2_raw_xattr) + strlen(xname) + 1 + size);
+ if (request > c->sector_size - c->cleanmarker_size)
+ return -ERANGE;
+
rc = jffs2_reserve_space(c, request, &length,
ALLOC_NORMAL, JFFS2_SUMMARY_XATTR_SIZE);
if (rc) {
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 73389c68e251..9609349e92e5 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1141,7 +1141,7 @@ journal_found:
lbmLogShutdown(log);
close: /* close external log device */
- fput(bdev_file);
+ bdev_fput(bdev_file);
free: /* free log descriptor */
mutex_unlock(&jfs_log_mutex);
@@ -1485,7 +1485,7 @@ int lmLogClose(struct super_block *sb)
bdev_file = log->bdev_file;
rc = lmLogShutdown(log);
- fput(bdev_file);
+ bdev_fput(bdev_file);
kfree(log);
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index e9df2f87072c..8502ef68459b 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -636,11 +636,18 @@ static int kernfs_fop_open(struct inode *inode, struct file *file)
* each file a separate locking class. Let's differentiate on
* whether the file has mmap or not for now.
*
- * Both paths of the branch look the same. They're supposed to
+ * For similar reasons, writable and readonly files are given different
+ * lockdep key, because the writable file /sys/power/resume may call vfs
+ * lookup helpers for arbitrary paths and readonly files can be read by
+ * overlayfs from vfs helpers when sysfs is a lower layer of overalyfs.
+ *
+ * All three cases look the same. They're supposed to
* look that way and give @of->mutex different static lockdep keys.
*/
if (has_mmap)
mutex_init(&of->mutex);
+ else if (file->f_mode & FMODE_WRITE)
+ mutex_init(&of->mutex);
else
mutex_init(&of->mutex);
diff --git a/fs/libfs.c b/fs/libfs.c
index 3a6f2cb364f8..b635ee5adbcc 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -295,6 +295,18 @@ int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry)
return 0;
}
+static int simple_offset_replace(struct offset_ctx *octx, struct dentry *dentry,
+ long offset)
+{
+ int ret;
+
+ ret = mtree_store(&octx->mt, offset, dentry, GFP_KERNEL);
+ if (ret)
+ return ret;
+ offset_set(dentry, offset);
+ return 0;
+}
+
/**
* simple_offset_remove - Remove an entry to a directory's offset map
* @octx: directory offset ctx to be updated
@@ -346,12 +358,45 @@ int simple_offset_empty(struct dentry *dentry)
}
/**
+ * simple_offset_rename - handle directory offsets for rename
+ * @old_dir: parent directory of source entry
+ * @old_dentry: dentry of source entry
+ * @new_dir: parent_directory of destination entry
+ * @new_dentry: dentry of destination
+ *
+ * Caller provides appropriate serialization.
+ *
+ * User space expects the directory offset value of the replaced
+ * (new) directory entry to be unchanged after a rename.
+ *
+ * Returns zero on success, a negative errno value on failure.
+ */
+int simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ struct offset_ctx *old_ctx = old_dir->i_op->get_offset_ctx(old_dir);
+ struct offset_ctx *new_ctx = new_dir->i_op->get_offset_ctx(new_dir);
+ long new_offset = dentry2offset(new_dentry);
+
+ simple_offset_remove(old_ctx, old_dentry);
+
+ if (new_offset) {
+ offset_set(new_dentry, 0);
+ return simple_offset_replace(new_ctx, old_dentry, new_offset);
+ }
+ return simple_offset_add(new_ctx, old_dentry);
+}
+
+/**
* simple_offset_rename_exchange - exchange rename with directory offsets
* @old_dir: parent of dentry being moved
* @old_dentry: dentry being moved
* @new_dir: destination parent
* @new_dentry: destination dentry
*
+ * This API preserves the directory offset values. Caller provides
+ * appropriate serialization.
+ *
* Returns zero on success. Otherwise a negative errno is returned and the
* rename is rolled back.
*/
@@ -369,11 +414,11 @@ int simple_offset_rename_exchange(struct inode *old_dir,
simple_offset_remove(old_ctx, old_dentry);
simple_offset_remove(new_ctx, new_dentry);
- ret = simple_offset_add(new_ctx, old_dentry);
+ ret = simple_offset_replace(new_ctx, old_dentry, new_index);
if (ret)
goto out_restore;
- ret = simple_offset_add(old_ctx, new_dentry);
+ ret = simple_offset_replace(old_ctx, new_dentry, old_index);
if (ret) {
simple_offset_remove(new_ctx, old_dentry);
goto out_restore;
@@ -388,10 +433,8 @@ int simple_offset_rename_exchange(struct inode *old_dir,
return 0;
out_restore:
- offset_set(old_dentry, old_index);
- mtree_store(&old_ctx->mt, old_index, old_dentry, GFP_KERNEL);
- offset_set(new_dentry, new_index);
- mtree_store(&new_ctx->mt, new_index, new_dentry, GFP_KERNEL);
+ (void)simple_offset_replace(old_ctx, old_dentry, old_index);
+ (void)simple_offset_replace(new_ctx, new_dentry, new_index);
return ret;
}
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 127a728fcbc8..c11516801784 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -117,7 +117,6 @@ static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni,
if (nsm != NULL)
refcount_inc(&nsm->sm_count);
else {
- host = NULL;
nsm = nsm_get_handle(ni->net, ni->sap, ni->salen,
ni->hostname, ni->hostname_len);
if (unlikely(nsm == NULL)) {
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index 7cbd2b9f4d11..7f9a2d8aa420 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -20,11 +20,11 @@
#include <linux/mpage.h>
#include <linux/vfs.h>
#include <linux/writeback.h>
+#include <linux/fs_context.h>
static int minix_write_inode(struct inode *inode,
struct writeback_control *wbc);
static int minix_statfs(struct dentry *dentry, struct kstatfs *buf);
-static int minix_remount (struct super_block * sb, int * flags, char * data);
static void minix_evict_inode(struct inode *inode)
{
@@ -111,19 +111,19 @@ static const struct super_operations minix_sops = {
.evict_inode = minix_evict_inode,
.put_super = minix_put_super,
.statfs = minix_statfs,
- .remount_fs = minix_remount,
};
-static int minix_remount (struct super_block * sb, int * flags, char * data)
+static int minix_reconfigure(struct fs_context *fc)
{
- struct minix_sb_info * sbi = minix_sb(sb);
struct minix_super_block * ms;
+ struct super_block *sb = fc->root->d_sb;
+ struct minix_sb_info * sbi = sb->s_fs_info;
sync_filesystem(sb);
ms = sbi->s_ms;
- if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
+ if ((bool)(fc->sb_flags & SB_RDONLY) == sb_rdonly(sb))
return 0;
- if (*flags & SB_RDONLY) {
+ if (fc->sb_flags & SB_RDONLY) {
if (ms->s_state & MINIX_VALID_FS ||
!(sbi->s_mount_state & MINIX_VALID_FS))
return 0;
@@ -170,7 +170,7 @@ static bool minix_check_superblock(struct super_block *sb)
return true;
}
-static int minix_fill_super(struct super_block *s, void *data, int silent)
+static int minix_fill_super(struct super_block *s, struct fs_context *fc)
{
struct buffer_head *bh;
struct buffer_head **map;
@@ -180,6 +180,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
struct inode *root_inode;
struct minix_sb_info *sbi;
int ret = -EINVAL;
+ int silent = fc->sb_flags & SB_SILENT;
sbi = kzalloc(sizeof(struct minix_sb_info), GFP_KERNEL);
if (!sbi)
@@ -371,6 +372,23 @@ out:
return ret;
}
+static int minix_get_tree(struct fs_context *fc)
+{
+ return get_tree_bdev(fc, minix_fill_super);
+}
+
+static const struct fs_context_operations minix_context_ops = {
+ .get_tree = minix_get_tree,
+ .reconfigure = minix_reconfigure,
+};
+
+static int minix_init_fs_context(struct fs_context *fc)
+{
+ fc->ops = &minix_context_ops;
+
+ return 0;
+}
+
static int minix_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
@@ -680,18 +698,12 @@ void minix_truncate(struct inode * inode)
V2_minix_truncate(inode);
}
-static struct dentry *minix_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
-{
- return mount_bdev(fs_type, flags, dev_name, data, minix_fill_super);
-}
-
static struct file_system_type minix_fs_type = {
- .owner = THIS_MODULE,
- .name = "minix",
- .mount = minix_mount,
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
+ .owner = THIS_MODULE,
+ .name = "minix",
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ .init_fs_context = minix_init_fs_context,
};
MODULE_ALIAS_FS("minix");
diff --git a/fs/namei.c b/fs/namei.c
index ceb9ddf8dfdd..cb5dde0e309f 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2422,6 +2422,14 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
if (!f.file)
return ERR_PTR(-EBADF);
+ if (flags & LOOKUP_LINKAT_EMPTY) {
+ if (f.file->f_cred != current_cred() &&
+ !ns_capable(f.file->f_cred->user_ns, CAP_DAC_READ_SEARCH)) {
+ fdput(f);
+ return ERR_PTR(-ENOENT);
+ }
+ }
+
dentry = f.file->f_path.dentry;
if (*s && unlikely(!d_can_lookup(dentry))) {
@@ -4050,6 +4058,8 @@ retry:
case 0: case S_IFREG:
error = vfs_create(idmap, path.dentry->d_inode,
dentry, mode, true);
+ if (!error)
+ security_path_post_mknod(idmap, dentry);
break;
case S_IFCHR: case S_IFBLK:
error = vfs_mknod(idmap, path.dentry->d_inode,
@@ -4060,11 +4070,6 @@ retry:
dentry, mode, 0);
break;
}
-
- if (error)
- goto out2;
-
- security_path_post_mknod(idmap, dentry);
out2:
done_path_create(&path, dentry);
if (retry_estale(error, lookup_flags)) {
@@ -4644,14 +4649,13 @@ int do_linkat(int olddfd, struct filename *old, int newdfd,
goto out_putnames;
}
/*
- * To use null names we require CAP_DAC_READ_SEARCH
+ * To use null names we require CAP_DAC_READ_SEARCH or
+ * that the open-time creds of the dfd matches current.
* This ensures that not everyone will be able to create
- * handlink using the passed filedescriptor.
+ * a hardlink using the passed file descriptor.
*/
- if (flags & AT_EMPTY_PATH && !capable(CAP_DAC_READ_SEARCH)) {
- error = -ENOENT;
- goto out_putnames;
- }
+ if (flags & AT_EMPTY_PATH)
+ how |= LOOKUP_LINKAT_EMPTY;
if (flags & AT_SYMLINK_FOLLOW)
how |= LOOKUP_FOLLOW;
diff --git a/fs/netfs/Makefile b/fs/netfs/Makefile
index d4d1d799819e..8e6781e0b10b 100644
--- a/fs/netfs/Makefile
+++ b/fs/netfs/Makefile
@@ -11,7 +11,8 @@ netfs-y := \
main.o \
misc.o \
objects.o \
- output.o
+ write_collect.o \
+ write_issue.o
netfs-$(CONFIG_NETFS_STATS) += stats.o
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
index 3298c29b5548..a6bb03bea920 100644
--- a/fs/netfs/buffered_read.c
+++ b/fs/netfs/buffered_read.c
@@ -10,8 +10,11 @@
#include "internal.h"
/*
- * Unlock the folios in a read operation. We need to set PG_fscache on any
+ * Unlock the folios in a read operation. We need to set PG_writeback on any
* folios we're going to write back before we unlock them.
+ *
+ * Note that if the deprecated NETFS_RREQ_USE_PGPRIV2 is set then we use
+ * PG_private_2 and do a direct write to the cache from here instead.
*/
void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
{
@@ -48,14 +51,14 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
xas_for_each(&xas, folio, last_page) {
loff_t pg_end;
bool pg_failed = false;
- bool folio_started;
+ bool wback_to_cache = false;
+ bool folio_started = false;
if (xas_retry(&xas, folio))
continue;
pg_end = folio_pos(folio) + folio_size(folio) - 1;
- folio_started = false;
for (;;) {
loff_t sreq_end;
@@ -63,10 +66,16 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
pg_failed = true;
break;
}
- if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
- trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
- folio_start_fscache(folio);
- folio_started = true;
+ if (test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)) {
+ if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE,
+ &subreq->flags)) {
+ trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
+ folio_start_private_2(folio);
+ folio_started = true;
+ }
+ } else {
+ wback_to_cache |=
+ test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
}
pg_failed |= subreq_failed;
sreq_end = subreq->start + subreq->len - 1;
@@ -98,6 +107,11 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
kfree(finfo);
}
folio_mark_uptodate(folio);
+ if (wback_to_cache && !WARN_ON_ONCE(folio_get_private(folio) != NULL)) {
+ trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
+ folio_attach_private(folio, NETFS_FOLIO_COPY_TO_CACHE);
+ filemap_dirty_folio(folio->mapping, folio);
+ }
}
if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
@@ -116,7 +130,9 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
}
static void netfs_cache_expand_readahead(struct netfs_io_request *rreq,
- loff_t *_start, size_t *_len, loff_t i_size)
+ unsigned long long *_start,
+ unsigned long long *_len,
+ unsigned long long i_size)
{
struct netfs_cache_resources *cres = &rreq->cache_resources;
@@ -266,7 +282,7 @@ int netfs_read_folio(struct file *file, struct folio *folio)
if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
goto discard;
- netfs_stat(&netfs_n_rh_readpage);
+ netfs_stat(&netfs_n_rh_read_folio);
trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
/* Set up the output buffer */
@@ -450,7 +466,7 @@ retry:
if (!netfs_is_cache_enabled(ctx) &&
netfs_skip_folio_read(folio, pos, len, false)) {
netfs_stat(&netfs_n_rh_write_zskip);
- goto have_folio_no_wait;
+ goto have_folio;
}
rreq = netfs_alloc_request(mapping, file,
@@ -491,10 +507,6 @@ retry:
netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
have_folio:
- ret = folio_wait_fscache_killable(folio);
- if (ret < 0)
- goto error;
-have_folio_no_wait:
*_folio = folio;
_leave(" = 0");
return 0;
diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c
index 9a0d32e4b422..1121601536d1 100644
--- a/fs/netfs/buffered_write.c
+++ b/fs/netfs/buffered_write.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Network filesystem high-level write support.
+/* Network filesystem high-level buffered write support.
*
* Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -26,25 +26,15 @@ enum netfs_how_to_modify {
NETFS_FLUSH_CONTENT, /* Flush incompatible content. */
};
-static void netfs_cleanup_buffered_write(struct netfs_io_request *wreq);
-
static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group)
{
- if (netfs_group && !folio_get_private(folio))
- folio_attach_private(folio, netfs_get_group(netfs_group));
-}
+ void *priv = folio_get_private(folio);
-#if IS_ENABLED(CONFIG_FSCACHE)
-static void netfs_folio_start_fscache(bool caching, struct folio *folio)
-{
- if (caching)
- folio_start_fscache(folio);
-}
-#else
-static void netfs_folio_start_fscache(bool caching, struct folio *folio)
-{
+ if (netfs_group && (!priv || priv == NETFS_FOLIO_COPY_TO_CACHE))
+ folio_attach_private(folio, netfs_get_group(netfs_group));
+ else if (!netfs_group && priv == NETFS_FOLIO_COPY_TO_CACHE)
+ folio_detach_private(folio);
}
-#endif
/*
* Decide how we should modify a folio. We might be attempting to do
@@ -63,11 +53,12 @@ static enum netfs_how_to_modify netfs_how_to_modify(struct netfs_inode *ctx,
bool maybe_trouble)
{
struct netfs_folio *finfo = netfs_folio_info(folio);
+ struct netfs_group *group = netfs_folio_group(folio);
loff_t pos = folio_file_pos(folio);
_enter("");
- if (netfs_folio_group(folio) != netfs_group)
+ if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE)
return NETFS_FLUSH_CONTENT;
if (folio_test_uptodate(folio))
@@ -81,16 +72,12 @@ static enum netfs_how_to_modify netfs_how_to_modify(struct netfs_inode *ctx,
if (file->f_mode & FMODE_READ)
goto no_write_streaming;
- if (test_bit(NETFS_ICTX_NO_WRITE_STREAMING, &ctx->flags))
- goto no_write_streaming;
if (netfs_is_cache_enabled(ctx)) {
/* We don't want to get a streaming write on a file that loses
* caching service temporarily because the backing store got
* culled.
*/
- if (!test_bit(NETFS_ICTX_NO_WRITE_STREAMING, &ctx->flags))
- set_bit(NETFS_ICTX_NO_WRITE_STREAMING, &ctx->flags);
goto no_write_streaming;
}
@@ -130,6 +117,37 @@ static struct folio *netfs_grab_folio_for_write(struct address_space *mapping,
mapping_gfp_mask(mapping));
}
+/*
+ * Update i_size and estimate the update to i_blocks to reflect the additional
+ * data written into the pagecache until we can find out from the server what
+ * the values actually are.
+ */
+static void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode,
+ loff_t i_size, loff_t pos, size_t copied)
+{
+ blkcnt_t add;
+ size_t gap;
+
+ if (ctx->ops->update_i_size) {
+ ctx->ops->update_i_size(inode, pos);
+ return;
+ }
+
+ i_size_write(inode, pos);
+#if IS_ENABLED(CONFIG_FSCACHE)
+ fscache_update_cookie(ctx->cache, NULL, &pos);
+#endif
+
+ gap = SECTOR_SIZE - (i_size & (SECTOR_SIZE - 1));
+ if (copied > gap) {
+ add = DIV_ROUND_UP(copied - gap, SECTOR_SIZE);
+
+ inode->i_blocks = min_t(blkcnt_t,
+ DIV_ROUND_UP(pos, SECTOR_SIZE),
+ inode->i_blocks + add);
+ }
+}
+
/**
* netfs_perform_write - Copy data into the pagecache.
* @iocb: The operation parameters
@@ -160,11 +178,11 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
};
struct netfs_io_request *wreq = NULL;
struct netfs_folio *finfo;
- struct folio *folio;
+ struct folio *folio, *writethrough = NULL;
enum netfs_how_to_modify howto;
enum netfs_folio_trace trace;
unsigned int bdp_flags = (iocb->ki_flags & IOCB_SYNC) ? 0: BDP_ASYNC;
- ssize_t written = 0, ret;
+ ssize_t written = 0, ret, ret2;
loff_t i_size, pos = iocb->ki_pos, from, to;
size_t max_chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
bool maybe_trouble = false;
@@ -172,15 +190,14 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
if (unlikely(test_bit(NETFS_ICTX_WRITETHROUGH, &ctx->flags) ||
iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC))
) {
- if (pos < i_size_read(inode)) {
- ret = filemap_write_and_wait_range(mapping, pos, pos + iter->count);
- if (ret < 0) {
- goto out;
- }
- }
-
wbc_attach_fdatawrite_inode(&wbc, mapping->host);
+ ret = filemap_write_and_wait_range(mapping, pos, pos + iter->count);
+ if (ret < 0) {
+ wbc_detach_inode(&wbc);
+ goto out;
+ }
+
wreq = netfs_begin_writethrough(iocb, iter->count);
if (IS_ERR(wreq)) {
wbc_detach_inode(&wbc);
@@ -190,7 +207,9 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
}
if (!is_sync_kiocb(iocb))
wreq->iocb = iocb;
- wreq->cleanup = netfs_cleanup_buffered_write;
+ netfs_stat(&netfs_n_wh_writethrough);
+ } else {
+ netfs_stat(&netfs_n_wh_buffered_write);
}
do {
@@ -231,6 +250,16 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
offset = pos & (flen - 1);
part = min_t(size_t, flen - offset, part);
+ /* Wait for writeback to complete. The writeback engine owns
+ * the info in folio->private and may change it until it
+ * removes the WB mark.
+ */
+ if (folio_get_private(folio) &&
+ folio_wait_writeback_killable(folio)) {
+ ret = written ? -EINTR : -ERESTARTSYS;
+ goto error_folio_unlock;
+ }
+
if (signal_pending(current)) {
ret = written ? -EINTR : -ERESTARTSYS;
goto error_folio_unlock;
@@ -305,6 +334,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
maybe_trouble = true;
iov_iter_revert(iter, copied);
copied = 0;
+ folio_unlock(folio);
goto retry;
}
netfs_set_group(folio, netfs_group);
@@ -352,41 +382,22 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
trace_netfs_folio(folio, trace);
/* Update the inode size if we moved the EOF marker */
- i_size = i_size_read(inode);
pos += copied;
- if (pos > i_size) {
- if (ctx->ops->update_i_size) {
- ctx->ops->update_i_size(inode, pos);
- } else {
- i_size_write(inode, pos);
-#if IS_ENABLED(CONFIG_FSCACHE)
- fscache_update_cookie(ctx->cache, NULL, &pos);
-#endif
- }
- }
+ i_size = i_size_read(inode);
+ if (pos > i_size)
+ netfs_update_i_size(ctx, inode, i_size, pos, copied);
written += copied;
if (likely(!wreq)) {
folio_mark_dirty(folio);
+ folio_unlock(folio);
} else {
- if (folio_test_dirty(folio))
- /* Sigh. mmap. */
- folio_clear_dirty_for_io(folio);
- /* We make multiple writes to the folio... */
- if (!folio_test_writeback(folio)) {
- folio_wait_fscache(folio);
- folio_start_writeback(folio);
- folio_start_fscache(folio);
- if (wreq->iter.count == 0)
- trace_netfs_folio(folio, netfs_folio_trace_wthru);
- else
- trace_netfs_folio(folio, netfs_folio_trace_wthru_plus);
- }
- netfs_advance_writethrough(wreq, copied,
- offset + copied == flen);
+ netfs_advance_writethrough(wreq, &wbc, folio, copied,
+ offset + copied == flen,
+ &writethrough);
+ /* Folio unlocked */
}
retry:
- folio_unlock(folio);
folio_put(folio);
folio = NULL;
@@ -394,11 +405,16 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
} while (iov_iter_count(iter));
out:
+ if (likely(written) && ctx->ops->post_modify)
+ ctx->ops->post_modify(inode);
+
if (unlikely(wreq)) {
- ret = netfs_end_writethrough(wreq, iocb);
+ ret2 = netfs_end_writethrough(wreq, &wbc, writethrough);
wbc_detach_inode(&wbc);
- if (ret == -EIOCBQUEUED)
- return ret;
+ if (ret2 == -EIOCBQUEUED)
+ return ret2;
+ if (ret == 0)
+ ret = ret2;
}
iocb->ki_pos += written;
@@ -504,9 +520,11 @@ EXPORT_SYMBOL(netfs_file_write_iter);
*/
vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group)
{
+ struct netfs_group *group;
struct folio *folio = page_folio(vmf->page);
struct file *file = vmf->vma->vm_file;
struct inode *inode = file_inode(file);
+ struct netfs_inode *ictx = netfs_inode(inode);
vm_fault_t ret = VM_FAULT_RETRY;
int err;
@@ -514,11 +532,13 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
sb_start_pagefault(inode->i_sb);
- if (folio_wait_writeback_killable(folio))
+ if (folio_lock_killable(folio) < 0)
goto out;
- if (folio_lock_killable(folio) < 0)
+ if (folio_wait_writeback_killable(folio)) {
+ ret = VM_FAULT_LOCKED;
goto out;
+ }
/* Can we see a streaming write here? */
if (WARN_ON(!folio_test_uptodate(folio))) {
@@ -526,7 +546,8 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
goto out;
}
- if (netfs_folio_group(folio) != netfs_group) {
+ group = netfs_folio_group(folio);
+ if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE) {
folio_unlock(folio);
err = filemap_fdatawait_range(inode->i_mapping,
folio_pos(folio),
@@ -550,708 +571,11 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
trace_netfs_folio(folio, netfs_folio_trace_mkwrite);
netfs_set_group(folio, netfs_group);
file_update_time(file);
+ if (ictx->ops->post_modify)
+ ictx->ops->post_modify(inode);
ret = VM_FAULT_LOCKED;
out:
sb_end_pagefault(inode->i_sb);
return ret;
}
EXPORT_SYMBOL(netfs_page_mkwrite);
-
-/*
- * Kill all the pages in the given range
- */
-static void netfs_kill_pages(struct address_space *mapping,
- loff_t start, loff_t len)
-{
- struct folio *folio;
- pgoff_t index = start / PAGE_SIZE;
- pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
-
- _enter("%llx-%llx", start, start + len - 1);
-
- do {
- _debug("kill %lx (to %lx)", index, last);
-
- folio = filemap_get_folio(mapping, index);
- if (IS_ERR(folio)) {
- next = index + 1;
- continue;
- }
-
- next = folio_next_index(folio);
-
- trace_netfs_folio(folio, netfs_folio_trace_kill);
- folio_clear_uptodate(folio);
- if (folio_test_fscache(folio))
- folio_end_fscache(folio);
- folio_end_writeback(folio);
- folio_lock(folio);
- generic_error_remove_folio(mapping, folio);
- folio_unlock(folio);
- folio_put(folio);
-
- } while (index = next, index <= last);
-
- _leave("");
-}
-
-/*
- * Redirty all the pages in a given range.
- */
-static void netfs_redirty_pages(struct address_space *mapping,
- loff_t start, loff_t len)
-{
- struct folio *folio;
- pgoff_t index = start / PAGE_SIZE;
- pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
-
- _enter("%llx-%llx", start, start + len - 1);
-
- do {
- _debug("redirty %llx @%llx", len, start);
-
- folio = filemap_get_folio(mapping, index);
- if (IS_ERR(folio)) {
- next = index + 1;
- continue;
- }
-
- next = folio_next_index(folio);
- trace_netfs_folio(folio, netfs_folio_trace_redirty);
- filemap_dirty_folio(mapping, folio);
- if (folio_test_fscache(folio))
- folio_end_fscache(folio);
- folio_end_writeback(folio);
- folio_put(folio);
- } while (index = next, index <= last);
-
- balance_dirty_pages_ratelimited(mapping);
-
- _leave("");
-}
-
-/*
- * Completion of write to server
- */
-static void netfs_pages_written_back(struct netfs_io_request *wreq)
-{
- struct address_space *mapping = wreq->mapping;
- struct netfs_folio *finfo;
- struct netfs_group *group = NULL;
- struct folio *folio;
- pgoff_t last;
- int gcount = 0;
-
- XA_STATE(xas, &mapping->i_pages, wreq->start / PAGE_SIZE);
-
- _enter("%llx-%llx", wreq->start, wreq->start + wreq->len);
-
- rcu_read_lock();
-
- last = (wreq->start + wreq->len - 1) / PAGE_SIZE;
- xas_for_each(&xas, folio, last) {
- WARN(!folio_test_writeback(folio),
- "bad %zx @%llx page %lx %lx\n",
- wreq->len, wreq->start, folio->index, last);
-
- if ((finfo = netfs_folio_info(folio))) {
- /* Streaming writes cannot be redirtied whilst under
- * writeback, so discard the streaming record.
- */
- folio_detach_private(folio);
- group = finfo->netfs_group;
- gcount++;
- trace_netfs_folio(folio, netfs_folio_trace_clear_s);
- kfree(finfo);
- } else if ((group = netfs_folio_group(folio))) {
- /* Need to detach the group pointer if the page didn't
- * get redirtied. If it has been redirtied, then it
- * must be within the same group.
- */
- if (folio_test_dirty(folio)) {
- trace_netfs_folio(folio, netfs_folio_trace_redirtied);
- goto end_wb;
- }
- if (folio_trylock(folio)) {
- if (!folio_test_dirty(folio)) {
- folio_detach_private(folio);
- gcount++;
- trace_netfs_folio(folio, netfs_folio_trace_clear_g);
- } else {
- trace_netfs_folio(folio, netfs_folio_trace_redirtied);
- }
- folio_unlock(folio);
- goto end_wb;
- }
-
- xas_pause(&xas);
- rcu_read_unlock();
- folio_lock(folio);
- if (!folio_test_dirty(folio)) {
- folio_detach_private(folio);
- gcount++;
- trace_netfs_folio(folio, netfs_folio_trace_clear_g);
- } else {
- trace_netfs_folio(folio, netfs_folio_trace_redirtied);
- }
- folio_unlock(folio);
- rcu_read_lock();
- } else {
- trace_netfs_folio(folio, netfs_folio_trace_clear);
- }
- end_wb:
- if (folio_test_fscache(folio))
- folio_end_fscache(folio);
- xas_advance(&xas, folio_next_index(folio) - 1);
- folio_end_writeback(folio);
- }
-
- rcu_read_unlock();
- netfs_put_group_many(group, gcount);
- _leave("");
-}
-
-/*
- * Deal with the disposition of the folios that are under writeback to close
- * out the operation.
- */
-static void netfs_cleanup_buffered_write(struct netfs_io_request *wreq)
-{
- struct address_space *mapping = wreq->mapping;
-
- _enter("");
-
- switch (wreq->error) {
- case 0:
- netfs_pages_written_back(wreq);
- break;
-
- default:
- pr_notice("R=%08x Unexpected error %d\n", wreq->debug_id, wreq->error);
- fallthrough;
- case -EACCES:
- case -EPERM:
- case -ENOKEY:
- case -EKEYEXPIRED:
- case -EKEYREJECTED:
- case -EKEYREVOKED:
- case -ENETRESET:
- case -EDQUOT:
- case -ENOSPC:
- netfs_redirty_pages(mapping, wreq->start, wreq->len);
- break;
-
- case -EROFS:
- case -EIO:
- case -EREMOTEIO:
- case -EFBIG:
- case -ENOENT:
- case -ENOMEDIUM:
- case -ENXIO:
- netfs_kill_pages(mapping, wreq->start, wreq->len);
- break;
- }
-
- if (wreq->error)
- mapping_set_error(mapping, wreq->error);
- if (wreq->netfs_ops->done)
- wreq->netfs_ops->done(wreq);
-}
-
-/*
- * Extend the region to be written back to include subsequent contiguously
- * dirty pages if possible, but don't sleep while doing so.
- *
- * If this page holds new content, then we can include filler zeros in the
- * writeback.
- */
-static void netfs_extend_writeback(struct address_space *mapping,
- struct netfs_group *group,
- struct xa_state *xas,
- long *_count,
- loff_t start,
- loff_t max_len,
- bool caching,
- size_t *_len,
- size_t *_top)
-{
- struct netfs_folio *finfo;
- struct folio_batch fbatch;
- struct folio *folio;
- unsigned int i;
- pgoff_t index = (start + *_len) / PAGE_SIZE;
- size_t len;
- void *priv;
- bool stop = true;
-
- folio_batch_init(&fbatch);
-
- do {
- /* Firstly, we gather up a batch of contiguous dirty pages
- * under the RCU read lock - but we can't clear the dirty flags
- * there if any of those pages are mapped.
- */
- rcu_read_lock();
-
- xas_for_each(xas, folio, ULONG_MAX) {
- stop = true;
- if (xas_retry(xas, folio))
- continue;
- if (xa_is_value(folio))
- break;
- if (folio->index != index) {
- xas_reset(xas);
- break;
- }
-
- if (!folio_try_get_rcu(folio)) {
- xas_reset(xas);
- continue;
- }
-
- /* Has the folio moved or been split? */
- if (unlikely(folio != xas_reload(xas))) {
- folio_put(folio);
- xas_reset(xas);
- break;
- }
-
- if (!folio_trylock(folio)) {
- folio_put(folio);
- xas_reset(xas);
- break;
- }
- if (!folio_test_dirty(folio) ||
- folio_test_writeback(folio) ||
- folio_test_fscache(folio)) {
- folio_unlock(folio);
- folio_put(folio);
- xas_reset(xas);
- break;
- }
-
- stop = false;
- len = folio_size(folio);
- priv = folio_get_private(folio);
- if ((const struct netfs_group *)priv != group) {
- stop = true;
- finfo = netfs_folio_info(folio);
- if (finfo->netfs_group != group ||
- finfo->dirty_offset > 0) {
- folio_unlock(folio);
- folio_put(folio);
- xas_reset(xas);
- break;
- }
- len = finfo->dirty_len;
- }
-
- *_top += folio_size(folio);
- index += folio_nr_pages(folio);
- *_count -= folio_nr_pages(folio);
- *_len += len;
- if (*_len >= max_len || *_count <= 0)
- stop = true;
-
- if (!folio_batch_add(&fbatch, folio))
- break;
- if (stop)
- break;
- }
-
- xas_pause(xas);
- rcu_read_unlock();
-
- /* Now, if we obtained any folios, we can shift them to being
- * writable and mark them for caching.
- */
- if (!folio_batch_count(&fbatch))
- break;
-
- for (i = 0; i < folio_batch_count(&fbatch); i++) {
- folio = fbatch.folios[i];
- trace_netfs_folio(folio, netfs_folio_trace_store_plus);
-
- if (!folio_clear_dirty_for_io(folio))
- BUG();
- folio_start_writeback(folio);
- netfs_folio_start_fscache(caching, folio);
- folio_unlock(folio);
- }
-
- folio_batch_release(&fbatch);
- cond_resched();
- } while (!stop);
-}
-
-/*
- * Synchronously write back the locked page and any subsequent non-locked dirty
- * pages.
- */
-static ssize_t netfs_write_back_from_locked_folio(struct address_space *mapping,
- struct writeback_control *wbc,
- struct netfs_group *group,
- struct xa_state *xas,
- struct folio *folio,
- unsigned long long start,
- unsigned long long end)
-{
- struct netfs_io_request *wreq;
- struct netfs_folio *finfo;
- struct netfs_inode *ctx = netfs_inode(mapping->host);
- unsigned long long i_size = i_size_read(&ctx->inode);
- size_t len, max_len;
- bool caching = netfs_is_cache_enabled(ctx);
- long count = wbc->nr_to_write;
- int ret;
-
- _enter(",%lx,%llx-%llx,%u", folio->index, start, end, caching);
-
- wreq = netfs_alloc_request(mapping, NULL, start, folio_size(folio),
- NETFS_WRITEBACK);
- if (IS_ERR(wreq)) {
- folio_unlock(folio);
- return PTR_ERR(wreq);
- }
-
- if (!folio_clear_dirty_for_io(folio))
- BUG();
- folio_start_writeback(folio);
- netfs_folio_start_fscache(caching, folio);
-
- count -= folio_nr_pages(folio);
-
- /* Find all consecutive lockable dirty pages that have contiguous
- * written regions, stopping when we find a page that is not
- * immediately lockable, is not dirty or is missing, or we reach the
- * end of the range.
- */
- trace_netfs_folio(folio, netfs_folio_trace_store);
-
- len = wreq->len;
- finfo = netfs_folio_info(folio);
- if (finfo) {
- start += finfo->dirty_offset;
- if (finfo->dirty_offset + finfo->dirty_len != len) {
- len = finfo->dirty_len;
- goto cant_expand;
- }
- len = finfo->dirty_len;
- }
-
- if (start < i_size) {
- /* Trim the write to the EOF; the extra data is ignored. Also
- * put an upper limit on the size of a single storedata op.
- */
- max_len = 65536 * 4096;
- max_len = min_t(unsigned long long, max_len, end - start + 1);
- max_len = min_t(unsigned long long, max_len, i_size - start);
-
- if (len < max_len)
- netfs_extend_writeback(mapping, group, xas, &count, start,
- max_len, caching, &len, &wreq->upper_len);
- }
-
-cant_expand:
- len = min_t(unsigned long long, len, i_size - start);
-
- /* We now have a contiguous set of dirty pages, each with writeback
- * set; the first page is still locked at this point, but all the rest
- * have been unlocked.
- */
- folio_unlock(folio);
- wreq->start = start;
- wreq->len = len;
-
- if (start < i_size) {
- _debug("write back %zx @%llx [%llx]", len, start, i_size);
-
- /* Speculatively write to the cache. We have to fix this up
- * later if the store fails.
- */
- wreq->cleanup = netfs_cleanup_buffered_write;
-
- iov_iter_xarray(&wreq->iter, ITER_SOURCE, &mapping->i_pages, start,
- wreq->upper_len);
- __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
- ret = netfs_begin_write(wreq, true, netfs_write_trace_writeback);
- if (ret == 0 || ret == -EIOCBQUEUED)
- wbc->nr_to_write -= len / PAGE_SIZE;
- } else {
- _debug("write discard %zx @%llx [%llx]", len, start, i_size);
-
- /* The dirty region was entirely beyond the EOF. */
- fscache_clear_page_bits(mapping, start, len, caching);
- netfs_pages_written_back(wreq);
- ret = 0;
- }
-
- netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
- _leave(" = 1");
- return 1;
-}
-
-/*
- * Write a region of pages back to the server
- */
-static ssize_t netfs_writepages_begin(struct address_space *mapping,
- struct writeback_control *wbc,
- struct netfs_group *group,
- struct xa_state *xas,
- unsigned long long *_start,
- unsigned long long end)
-{
- const struct netfs_folio *finfo;
- struct folio *folio;
- unsigned long long start = *_start;
- ssize_t ret;
- void *priv;
- int skips = 0;
-
- _enter("%llx,%llx,", start, end);
-
-search_again:
- /* Find the first dirty page in the group. */
- rcu_read_lock();
-
- for (;;) {
- folio = xas_find_marked(xas, end / PAGE_SIZE, PAGECACHE_TAG_DIRTY);
- if (xas_retry(xas, folio) || xa_is_value(folio))
- continue;
- if (!folio)
- break;
-
- if (!folio_try_get_rcu(folio)) {
- xas_reset(xas);
- continue;
- }
-
- if (unlikely(folio != xas_reload(xas))) {
- folio_put(folio);
- xas_reset(xas);
- continue;
- }
-
- /* Skip any dirty folio that's not in the group of interest. */
- priv = folio_get_private(folio);
- if ((const struct netfs_group *)priv != group) {
- finfo = netfs_folio_info(folio);
- if (finfo->netfs_group != group) {
- folio_put(folio);
- continue;
- }
- }
-
- xas_pause(xas);
- break;
- }
- rcu_read_unlock();
- if (!folio)
- return 0;
-
- start = folio_pos(folio); /* May regress with THPs */
-
- _debug("wback %lx", folio->index);
-
- /* At this point we hold neither the i_pages lock nor the page lock:
- * the page may be truncated or invalidated (changing page->mapping to
- * NULL), or even swizzled back from swapper_space to tmpfs file
- * mapping
- */
-lock_again:
- if (wbc->sync_mode != WB_SYNC_NONE) {
- ret = folio_lock_killable(folio);
- if (ret < 0)
- return ret;
- } else {
- if (!folio_trylock(folio))
- goto search_again;
- }
-
- if (folio->mapping != mapping ||
- !folio_test_dirty(folio)) {
- start += folio_size(folio);
- folio_unlock(folio);
- goto search_again;
- }
-
- if (folio_test_writeback(folio) ||
- folio_test_fscache(folio)) {
- folio_unlock(folio);
- if (wbc->sync_mode != WB_SYNC_NONE) {
- folio_wait_writeback(folio);
-#ifdef CONFIG_FSCACHE
- folio_wait_fscache(folio);
-#endif
- goto lock_again;
- }
-
- start += folio_size(folio);
- if (wbc->sync_mode == WB_SYNC_NONE) {
- if (skips >= 5 || need_resched()) {
- ret = 0;
- goto out;
- }
- skips++;
- }
- goto search_again;
- }
-
- ret = netfs_write_back_from_locked_folio(mapping, wbc, group, xas,
- folio, start, end);
-out:
- if (ret > 0)
- *_start = start + ret;
- _leave(" = %zd [%llx]", ret, *_start);
- return ret;
-}
-
-/*
- * Write a region of pages back to the server
- */
-static int netfs_writepages_region(struct address_space *mapping,
- struct writeback_control *wbc,
- struct netfs_group *group,
- unsigned long long *_start,
- unsigned long long end)
-{
- ssize_t ret;
-
- XA_STATE(xas, &mapping->i_pages, *_start / PAGE_SIZE);
-
- do {
- ret = netfs_writepages_begin(mapping, wbc, group, &xas,
- _start, end);
- if (ret > 0 && wbc->nr_to_write > 0)
- cond_resched();
- } while (ret > 0 && wbc->nr_to_write > 0);
-
- return ret > 0 ? 0 : ret;
-}
-
-/*
- * write some of the pending data back to the server
- */
-int netfs_writepages(struct address_space *mapping,
- struct writeback_control *wbc)
-{
- struct netfs_group *group = NULL;
- loff_t start, end;
- int ret;
-
- _enter("");
-
- /* We have to be careful as we can end up racing with setattr()
- * truncating the pagecache since the caller doesn't take a lock here
- * to prevent it.
- */
-
- if (wbc->range_cyclic && mapping->writeback_index) {
- start = mapping->writeback_index * PAGE_SIZE;
- ret = netfs_writepages_region(mapping, wbc, group,
- &start, LLONG_MAX);
- if (ret < 0)
- goto out;
-
- if (wbc->nr_to_write <= 0) {
- mapping->writeback_index = start / PAGE_SIZE;
- goto out;
- }
-
- start = 0;
- end = mapping->writeback_index * PAGE_SIZE;
- mapping->writeback_index = 0;
- ret = netfs_writepages_region(mapping, wbc, group, &start, end);
- if (ret == 0)
- mapping->writeback_index = start / PAGE_SIZE;
- } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
- start = 0;
- ret = netfs_writepages_region(mapping, wbc, group,
- &start, LLONG_MAX);
- if (wbc->nr_to_write > 0 && ret == 0)
- mapping->writeback_index = start / PAGE_SIZE;
- } else {
- start = wbc->range_start;
- ret = netfs_writepages_region(mapping, wbc, group,
- &start, wbc->range_end);
- }
-
-out:
- _leave(" = %d", ret);
- return ret;
-}
-EXPORT_SYMBOL(netfs_writepages);
-
-/*
- * Deal with the disposition of a laundered folio.
- */
-static void netfs_cleanup_launder_folio(struct netfs_io_request *wreq)
-{
- if (wreq->error) {
- pr_notice("R=%08x Laundering error %d\n", wreq->debug_id, wreq->error);
- mapping_set_error(wreq->mapping, wreq->error);
- }
-}
-
-/**
- * netfs_launder_folio - Clean up a dirty folio that's being invalidated
- * @folio: The folio to clean
- *
- * This is called to write back a folio that's being invalidated when an inode
- * is getting torn down. Ideally, writepages would be used instead.
- */
-int netfs_launder_folio(struct folio *folio)
-{
- struct netfs_io_request *wreq;
- struct address_space *mapping = folio->mapping;
- struct netfs_folio *finfo = netfs_folio_info(folio);
- struct netfs_group *group = netfs_folio_group(folio);
- struct bio_vec bvec;
- unsigned long long i_size = i_size_read(mapping->host);
- unsigned long long start = folio_pos(folio);
- size_t offset = 0, len;
- int ret = 0;
-
- if (finfo) {
- offset = finfo->dirty_offset;
- start += offset;
- len = finfo->dirty_len;
- } else {
- len = folio_size(folio);
- }
- len = min_t(unsigned long long, len, i_size - start);
-
- wreq = netfs_alloc_request(mapping, NULL, start, len, NETFS_LAUNDER_WRITE);
- if (IS_ERR(wreq)) {
- ret = PTR_ERR(wreq);
- goto out;
- }
-
- if (!folio_clear_dirty_for_io(folio))
- goto out_put;
-
- trace_netfs_folio(folio, netfs_folio_trace_launder);
-
- _debug("launder %llx-%llx", start, start + len - 1);
-
- /* Speculatively write to the cache. We have to fix this up later if
- * the store fails.
- */
- wreq->cleanup = netfs_cleanup_launder_folio;
-
- bvec_set_folio(&bvec, folio, len, offset);
- iov_iter_bvec(&wreq->iter, ITER_SOURCE, &bvec, 1, len);
- __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
- ret = netfs_begin_write(wreq, true, netfs_write_trace_launder);
-
-out_put:
- folio_detach_private(folio);
- netfs_put_group(group);
- kfree(finfo);
- netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
-out:
- folio_wait_fscache(folio);
- _leave(" = %d", ret);
- return ret;
-}
-EXPORT_SYMBOL(netfs_launder_folio);
diff --git a/fs/netfs/direct_read.c b/fs/netfs/direct_read.c
index ad4370b3935d..10a1e4da6bda 100644
--- a/fs/netfs/direct_read.c
+++ b/fs/netfs/direct_read.c
@@ -26,7 +26,7 @@
*
* The caller must hold any appropriate locks.
*/
-static ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *iter)
+ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *iter)
{
struct netfs_io_request *rreq;
ssize_t ret;
@@ -98,6 +98,7 @@ out:
iov_iter_revert(iter, orig_count - iov_iter_count(iter));
return ret;
}
+EXPORT_SYMBOL(netfs_unbuffered_read_iter_locked);
/**
* netfs_unbuffered_read_iter - Perform an unbuffered or direct I/O read
diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c
index bee047e20f5d..608ba6416919 100644
--- a/fs/netfs/direct_write.c
+++ b/fs/netfs/direct_write.c
@@ -34,6 +34,7 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
unsigned long long start = iocb->ki_pos;
unsigned long long end = start + iov_iter_count(iter);
ssize_t ret, n;
+ size_t len = iov_iter_count(iter);
bool async = !is_sync_kiocb(iocb);
_enter("");
@@ -46,13 +47,17 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
_debug("uw %llx-%llx", start, end);
- wreq = netfs_alloc_request(iocb->ki_filp->f_mapping, iocb->ki_filp,
- start, end - start,
- iocb->ki_flags & IOCB_DIRECT ?
- NETFS_DIO_WRITE : NETFS_UNBUFFERED_WRITE);
+ wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp, start,
+ iocb->ki_flags & IOCB_DIRECT ?
+ NETFS_DIO_WRITE : NETFS_UNBUFFERED_WRITE);
if (IS_ERR(wreq))
return PTR_ERR(wreq);
+ wreq->io_streams[0].avail = true;
+ trace_netfs_write(wreq, (iocb->ki_flags & IOCB_DIRECT ?
+ netfs_write_trace_dio_write :
+ netfs_write_trace_unbuffered_write));
+
{
/* If this is an async op and we're not using a bounce buffer,
* we have to save the source buffer as the iterator is only
@@ -63,7 +68,7 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
* request.
*/
if (async || user_backed_iter(iter)) {
- n = netfs_extract_user_iter(iter, wreq->len, &wreq->iter, 0);
+ n = netfs_extract_user_iter(iter, len, &wreq->iter, 0);
if (n < 0) {
ret = n;
goto out;
@@ -71,7 +76,6 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
wreq->direct_bv = (struct bio_vec *)wreq->iter.bvec;
wreq->direct_bv_count = n;
wreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
- wreq->len = iov_iter_count(&wreq->iter);
} else {
wreq->iter = *iter;
}
@@ -79,6 +83,8 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
wreq->io_iter = wreq->iter;
}
+ __set_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags);
+
/* Copy the data into the bounce buffer and encrypt it. */
// TODO
@@ -87,10 +93,7 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
if (async)
wreq->iocb = iocb;
wreq->cleanup = netfs_cleanup_dio_write;
- ret = netfs_begin_write(wreq, is_sync_kiocb(iocb),
- iocb->ki_flags & IOCB_DIRECT ?
- netfs_write_trace_dio_write :
- netfs_write_trace_unbuffered_write);
+ ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), iov_iter_count(&wreq->io_iter));
if (ret < 0) {
_debug("begin = %zd", ret);
goto out;
@@ -100,9 +103,8 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
trace_netfs_rreq(wreq, netfs_rreq_trace_wait_ip);
wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS,
TASK_UNINTERRUPTIBLE);
-
+ smp_rmb(); /* Read error/transferred after RIP flag */
ret = wreq->error;
- _debug("waited = %zd", ret);
if (ret == 0) {
ret = wreq->transferred;
iocb->ki_pos += ret;
@@ -132,18 +134,20 @@ out:
ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
- struct inode *inode = file->f_mapping->host;
+ struct address_space *mapping = file->f_mapping;
+ struct inode *inode = mapping->host;
struct netfs_inode *ictx = netfs_inode(inode);
- unsigned long long end;
ssize_t ret;
+ loff_t pos = iocb->ki_pos;
+ unsigned long long end = pos + iov_iter_count(from) - 1;
- _enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));
+ _enter("%llx,%zx,%llx", pos, iov_iter_count(from), i_size_read(inode));
if (!iov_iter_count(from))
return 0;
trace_netfs_write_iter(iocb, from);
- netfs_stat(&netfs_n_rh_dio_write);
+ netfs_stat(&netfs_n_wh_dio_write);
ret = netfs_start_io_direct(inode);
if (ret < 0)
@@ -157,7 +161,25 @@ ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from)
ret = file_update_time(file);
if (ret < 0)
goto out;
- ret = kiocb_invalidate_pages(iocb, iov_iter_count(from));
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ /* We could block if there are any pages in the range. */
+ ret = -EAGAIN;
+ if (filemap_range_has_page(mapping, pos, end))
+ if (filemap_invalidate_inode(inode, true, pos, end))
+ goto out;
+ } else {
+ ret = filemap_write_and_wait_range(mapping, pos, end);
+ if (ret < 0)
+ goto out;
+ }
+
+ /*
+ * After a write we want buffered reads to be sure to go to disk to get
+ * the new data. We invalidate clean cached page from the region we're
+ * about to write. We do this *before* the write so that we can return
+ * without clobbering -EIOCBQUEUED from ->direct_IO().
+ */
+ ret = filemap_invalidate_inode(inode, true, pos, end);
if (ret < 0)
goto out;
end = iocb->ki_pos + iov_iter_count(from);
diff --git a/fs/netfs/fscache_io.c b/fs/netfs/fscache_io.c
index 43a651ed8264..38637e5c9b57 100644
--- a/fs/netfs/fscache_io.c
+++ b/fs/netfs/fscache_io.c
@@ -166,6 +166,7 @@ struct fscache_write_request {
loff_t start;
size_t len;
bool set_bits;
+ bool using_pgpriv2;
netfs_io_terminated_t term_func;
void *term_func_priv;
};
@@ -182,7 +183,7 @@ void __fscache_clear_page_bits(struct address_space *mapping,
rcu_read_lock();
xas_for_each(&xas, page, last) {
- end_page_fscache(page);
+ folio_end_private_2(page_folio(page));
}
rcu_read_unlock();
}
@@ -197,8 +198,9 @@ static void fscache_wreq_done(void *priv, ssize_t transferred_or_error,
{
struct fscache_write_request *wreq = priv;
- fscache_clear_page_bits(wreq->mapping, wreq->start, wreq->len,
- wreq->set_bits);
+ if (wreq->using_pgpriv2)
+ fscache_clear_page_bits(wreq->mapping, wreq->start, wreq->len,
+ wreq->set_bits);
if (wreq->term_func)
wreq->term_func(wreq->term_func_priv, transferred_or_error,
@@ -212,7 +214,7 @@ void __fscache_write_to_cache(struct fscache_cookie *cookie,
loff_t start, size_t len, loff_t i_size,
netfs_io_terminated_t term_func,
void *term_func_priv,
- bool cond)
+ bool using_pgpriv2, bool cond)
{
struct fscache_write_request *wreq;
struct netfs_cache_resources *cres;
@@ -230,6 +232,7 @@ void __fscache_write_to_cache(struct fscache_cookie *cookie,
wreq->mapping = mapping;
wreq->start = start;
wreq->len = len;
+ wreq->using_pgpriv2 = using_pgpriv2;
wreq->set_bits = cond;
wreq->term_func = term_func;
wreq->term_func_priv = term_func_priv;
@@ -257,7 +260,8 @@ abandon_end:
abandon_free:
kfree(wreq);
abandon:
- fscache_clear_page_bits(mapping, start, len, cond);
+ if (using_pgpriv2)
+ fscache_clear_page_bits(mapping, start, len, cond);
if (term_func)
term_func(term_func_priv, ret, false);
}
diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h
index ec7045d24400..95e281a8af78 100644
--- a/fs/netfs/internal.h
+++ b/fs/netfs/internal.h
@@ -37,6 +37,8 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync);
extern unsigned int netfs_debug;
extern struct list_head netfs_io_requests;
extern spinlock_t netfs_proc_lock;
+extern mempool_t netfs_request_pool;
+extern mempool_t netfs_subrequest_pool;
#ifdef CONFIG_PROC_FS
static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq)
@@ -91,22 +93,12 @@ static inline void netfs_see_request(struct netfs_io_request *rreq,
}
/*
- * output.c
- */
-int netfs_begin_write(struct netfs_io_request *wreq, bool may_wait,
- enum netfs_write_trace what);
-struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len);
-int netfs_advance_writethrough(struct netfs_io_request *wreq, size_t copied, bool to_page_end);
-int netfs_end_writethrough(struct netfs_io_request *wreq, struct kiocb *iocb);
-
-/*
* stats.c
*/
#ifdef CONFIG_NETFS_STATS
extern atomic_t netfs_n_rh_dio_read;
-extern atomic_t netfs_n_rh_dio_write;
extern atomic_t netfs_n_rh_readahead;
-extern atomic_t netfs_n_rh_readpage;
+extern atomic_t netfs_n_rh_read_folio;
extern atomic_t netfs_n_rh_rreq;
extern atomic_t netfs_n_rh_sreq;
extern atomic_t netfs_n_rh_download;
@@ -123,6 +115,10 @@ extern atomic_t netfs_n_rh_write_begin;
extern atomic_t netfs_n_rh_write_done;
extern atomic_t netfs_n_rh_write_failed;
extern atomic_t netfs_n_rh_write_zskip;
+extern atomic_t netfs_n_wh_buffered_write;
+extern atomic_t netfs_n_wh_writethrough;
+extern atomic_t netfs_n_wh_dio_write;
+extern atomic_t netfs_n_wh_writepages;
extern atomic_t netfs_n_wh_wstream_conflict;
extern atomic_t netfs_n_wh_upload;
extern atomic_t netfs_n_wh_upload_done;
@@ -149,6 +145,33 @@ static inline void netfs_stat_d(atomic_t *stat)
#endif
/*
+ * write_collect.c
+ */
+int netfs_folio_written_back(struct folio *folio);
+void netfs_write_collection_worker(struct work_struct *work);
+void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async);
+
+/*
+ * write_issue.c
+ */
+struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
+ struct file *file,
+ loff_t start,
+ enum netfs_io_origin origin);
+void netfs_reissue_write(struct netfs_io_stream *stream,
+ struct netfs_io_subrequest *subreq);
+int netfs_advance_write(struct netfs_io_request *wreq,
+ struct netfs_io_stream *stream,
+ loff_t start, size_t len, bool to_eof);
+struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len);
+int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
+ struct folio *folio, size_t copied, bool to_page_end,
+ struct folio **writethrough_cache);
+int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
+ struct folio *writethrough_cache);
+int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len);
+
+/*
* Miscellaneous functions.
*/
static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx)
@@ -168,7 +191,7 @@ static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx)
*/
static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_group)
{
- if (netfs_group)
+ if (netfs_group && netfs_group != NETFS_FOLIO_COPY_TO_CACHE)
refcount_inc(&netfs_group->ref);
return netfs_group;
}
@@ -178,7 +201,9 @@ static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_grou
*/
static inline void netfs_put_group(struct netfs_group *netfs_group)
{
- if (netfs_group && refcount_dec_and_test(&netfs_group->ref))
+ if (netfs_group &&
+ netfs_group != NETFS_FOLIO_COPY_TO_CACHE &&
+ refcount_dec_and_test(&netfs_group->ref))
netfs_group->free(netfs_group);
}
@@ -187,7 +212,9 @@ static inline void netfs_put_group(struct netfs_group *netfs_group)
*/
static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr)
{
- if (netfs_group && refcount_sub_and_test(nr, &netfs_group->ref))
+ if (netfs_group &&
+ netfs_group != NETFS_FOLIO_COPY_TO_CACHE &&
+ refcount_sub_and_test(nr, &netfs_group->ref))
netfs_group->free(netfs_group);
}
diff --git a/fs/netfs/io.c b/fs/netfs/io.c
index 4261ad6c55b6..c93851b98368 100644
--- a/fs/netfs/io.c
+++ b/fs/netfs/io.c
@@ -99,145 +99,6 @@ static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async)
}
/*
- * Deal with the completion of writing the data to the cache. We have to clear
- * the PG_fscache bits on the folios involved and release the caller's ref.
- *
- * May be called in softirq mode and we inherit a ref from the caller.
- */
-static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
- bool was_async)
-{
- struct netfs_io_subrequest *subreq;
- struct folio *folio;
- pgoff_t unlocked = 0;
- bool have_unlocked = false;
-
- rcu_read_lock();
-
- list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
- XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
-
- xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
- if (xas_retry(&xas, folio))
- continue;
-
- /* We might have multiple writes from the same huge
- * folio, but we mustn't unlock a folio more than once.
- */
- if (have_unlocked && folio->index <= unlocked)
- continue;
- unlocked = folio_next_index(folio) - 1;
- trace_netfs_folio(folio, netfs_folio_trace_end_copy);
- folio_end_fscache(folio);
- have_unlocked = true;
- }
- }
-
- rcu_read_unlock();
- netfs_rreq_completed(rreq, was_async);
-}
-
-static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
- bool was_async)
-{
- struct netfs_io_subrequest *subreq = priv;
- struct netfs_io_request *rreq = subreq->rreq;
-
- if (IS_ERR_VALUE(transferred_or_error)) {
- netfs_stat(&netfs_n_rh_write_failed);
- trace_netfs_failure(rreq, subreq, transferred_or_error,
- netfs_fail_copy_to_cache);
- } else {
- netfs_stat(&netfs_n_rh_write_done);
- }
-
- trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);
-
- /* If we decrement nr_copy_ops to 0, the ref belongs to us. */
- if (atomic_dec_and_test(&rreq->nr_copy_ops))
- netfs_rreq_unmark_after_write(rreq, was_async);
-
- netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
-}
-
-/*
- * Perform any outstanding writes to the cache. We inherit a ref from the
- * caller.
- */
-static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
-{
- struct netfs_cache_resources *cres = &rreq->cache_resources;
- struct netfs_io_subrequest *subreq, *next, *p;
- struct iov_iter iter;
- int ret;
-
- trace_netfs_rreq(rreq, netfs_rreq_trace_copy);
-
- /* We don't want terminating writes trying to wake us up whilst we're
- * still going through the list.
- */
- atomic_inc(&rreq->nr_copy_ops);
-
- list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
- if (!test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
- list_del_init(&subreq->rreq_link);
- netfs_put_subrequest(subreq, false,
- netfs_sreq_trace_put_no_copy);
- }
- }
-
- list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
- /* Amalgamate adjacent writes */
- while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
- next = list_next_entry(subreq, rreq_link);
- if (next->start != subreq->start + subreq->len)
- break;
- subreq->len += next->len;
- list_del_init(&next->rreq_link);
- netfs_put_subrequest(next, false,
- netfs_sreq_trace_put_merged);
- }
-
- ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
- subreq->len, rreq->i_size, true);
- if (ret < 0) {
- trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
- trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
- continue;
- }
-
- iov_iter_xarray(&iter, ITER_SOURCE, &rreq->mapping->i_pages,
- subreq->start, subreq->len);
-
- atomic_inc(&rreq->nr_copy_ops);
- netfs_stat(&netfs_n_rh_write);
- netfs_get_subrequest(subreq, netfs_sreq_trace_get_copy_to_cache);
- trace_netfs_sreq(subreq, netfs_sreq_trace_write);
- cres->ops->write(cres, subreq->start, &iter,
- netfs_rreq_copy_terminated, subreq);
- }
-
- /* If we decrement nr_copy_ops to 0, the usage ref belongs to us. */
- if (atomic_dec_and_test(&rreq->nr_copy_ops))
- netfs_rreq_unmark_after_write(rreq, false);
-}
-
-static void netfs_rreq_write_to_cache_work(struct work_struct *work)
-{
- struct netfs_io_request *rreq =
- container_of(work, struct netfs_io_request, work);
-
- netfs_rreq_do_write_to_cache(rreq);
-}
-
-static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq)
-{
- rreq->work.func = netfs_rreq_write_to_cache_work;
- if (!queue_work(system_unbound_wq, &rreq->work))
- BUG();
-}
-
-/*
* Handle a short read.
*/
static void netfs_rreq_short_read(struct netfs_io_request *rreq,
@@ -352,8 +213,13 @@ static void netfs_rreq_assess_dio(struct netfs_io_request *rreq)
unsigned int i;
size_t transferred = 0;
- for (i = 0; i < rreq->direct_bv_count; i++)
+ for (i = 0; i < rreq->direct_bv_count; i++) {
flush_dcache_page(rreq->direct_bv[i].bv_page);
+ // TODO: cifs marks pages in the destination buffer
+ // dirty under some circumstances after a read. Do we
+ // need to do that too?
+ set_page_dirty(rreq->direct_bv[i].bv_page);
+ }
list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
if (subreq->error || subreq->transferred == 0)
@@ -409,9 +275,6 @@ again:
clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
- if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags))
- return netfs_rreq_write_to_cache(rreq);
-
netfs_rreq_completed(rreq, was_async);
}
@@ -618,7 +481,7 @@ netfs_rreq_prepare_read(struct netfs_io_request *rreq,
set:
if (subreq->len > rreq->len)
- pr_warn("R=%08x[%u] SREQ>RREQ %zx > %zx\n",
+ pr_warn("R=%08x[%u] SREQ>RREQ %zx > %llx\n",
rreq->debug_id, subreq->debug_index,
subreq->len, rreq->len);
@@ -643,8 +506,7 @@ out:
* Slice off a piece of a read request and submit an I/O request for it.
*/
static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
- struct iov_iter *io_iter,
- unsigned int *_debug_index)
+ struct iov_iter *io_iter)
{
struct netfs_io_subrequest *subreq;
enum netfs_io_source source;
@@ -653,11 +515,10 @@ static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
if (!subreq)
return false;
- subreq->debug_index = (*_debug_index)++;
subreq->start = rreq->start + rreq->submitted;
subreq->len = io_iter->count;
- _debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted);
+ _debug("slice %llx,%zx,%llx", subreq->start, subreq->len, rreq->submitted);
list_add_tail(&subreq->rreq_link, &rreq->subrequests);
/* Call out to the cache to find out what it can do with the remaining
@@ -707,7 +568,6 @@ subreq_failed:
int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
{
struct iov_iter io_iter;
- unsigned int debug_index = 0;
int ret;
_enter("R=%x %llx-%llx",
@@ -733,12 +593,12 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
atomic_set(&rreq->nr_outstanding, 1);
io_iter = rreq->io_iter;
do {
- _debug("submit %llx + %zx >= %llx",
+ _debug("submit %llx + %llx >= %llx",
rreq->start, rreq->submitted, rreq->i_size);
if (rreq->origin == NETFS_DIO_READ &&
rreq->start + rreq->submitted >= rreq->i_size)
break;
- if (!netfs_rreq_submit_slice(rreq, &io_iter, &debug_index))
+ if (!netfs_rreq_submit_slice(rreq, &io_iter))
break;
if (test_bit(NETFS_RREQ_BLOCKED, &rreq->flags) &&
test_bit(NETFS_RREQ_NONBLOCK, &rreq->flags))
diff --git a/fs/netfs/main.c b/fs/netfs/main.c
index 5e77618a7940..5f0f438e5d21 100644
--- a/fs/netfs/main.c
+++ b/fs/netfs/main.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/export.h>
+#include <linux/mempool.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include "internal.h"
@@ -23,6 +24,11 @@ unsigned netfs_debug;
module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask");
+static struct kmem_cache *netfs_request_slab;
+static struct kmem_cache *netfs_subrequest_slab;
+mempool_t netfs_request_pool;
+mempool_t netfs_subrequest_pool;
+
#ifdef CONFIG_PROC_FS
LIST_HEAD(netfs_io_requests);
DEFINE_SPINLOCK(netfs_proc_lock);
@@ -31,9 +37,9 @@ static const char *netfs_origins[nr__netfs_io_origin] = {
[NETFS_READAHEAD] = "RA",
[NETFS_READPAGE] = "RP",
[NETFS_READ_FOR_WRITE] = "RW",
+ [NETFS_COPY_TO_CACHE] = "CC",
[NETFS_WRITEBACK] = "WB",
[NETFS_WRITETHROUGH] = "WT",
- [NETFS_LAUNDER_WRITE] = "LW",
[NETFS_UNBUFFERED_WRITE] = "UW",
[NETFS_DIO_READ] = "DR",
[NETFS_DIO_WRITE] = "DW",
@@ -56,7 +62,7 @@ static int netfs_requests_seq_show(struct seq_file *m, void *v)
rreq = list_entry(v, struct netfs_io_request, proc_link);
seq_printf(m,
- "%08x %s %3d %2lx %4d %3d @%04llx %zx/%zx",
+ "%08x %s %3d %2lx %4d %3d @%04llx %llx/%llx",
rreq->debug_id,
netfs_origins[rreq->origin],
refcount_read(&rreq->ref),
@@ -98,25 +104,54 @@ static int __init netfs_init(void)
{
int ret = -ENOMEM;
+ netfs_request_slab = kmem_cache_create("netfs_request",
+ sizeof(struct netfs_io_request), 0,
+ SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT,
+ NULL);
+ if (!netfs_request_slab)
+ goto error_req;
+
+ if (mempool_init_slab_pool(&netfs_request_pool, 100, netfs_request_slab) < 0)
+ goto error_reqpool;
+
+ netfs_subrequest_slab = kmem_cache_create("netfs_subrequest",
+ sizeof(struct netfs_io_subrequest), 0,
+ SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT,
+ NULL);
+ if (!netfs_subrequest_slab)
+ goto error_subreq;
+
+ if (mempool_init_slab_pool(&netfs_subrequest_pool, 100, netfs_subrequest_slab) < 0)
+ goto error_subreqpool;
+
if (!proc_mkdir("fs/netfs", NULL))
- goto error;
+ goto error_proc;
if (!proc_create_seq("fs/netfs/requests", S_IFREG | 0444, NULL,
&netfs_requests_seq_ops))
- goto error_proc;
+ goto error_procfile;
#ifdef CONFIG_FSCACHE_STATS
if (!proc_create_single("fs/netfs/stats", S_IFREG | 0444, NULL,
netfs_stats_show))
- goto error_proc;
+ goto error_procfile;
#endif
ret = fscache_init();
if (ret < 0)
- goto error_proc;
+ goto error_fscache;
return 0;
-error_proc:
+error_fscache:
+error_procfile:
remove_proc_entry("fs/netfs", NULL);
-error:
+error_proc:
+ mempool_exit(&netfs_subrequest_pool);
+error_subreqpool:
+ kmem_cache_destroy(netfs_subrequest_slab);
+error_subreq:
+ mempool_exit(&netfs_request_pool);
+error_reqpool:
+ kmem_cache_destroy(netfs_request_slab);
+error_req:
return ret;
}
fs_initcall(netfs_init);
@@ -125,5 +160,9 @@ static void __exit netfs_exit(void)
{
fscache_exit();
remove_proc_entry("fs/netfs", NULL);
+ mempool_exit(&netfs_subrequest_pool);
+ kmem_cache_destroy(netfs_subrequest_slab);
+ mempool_exit(&netfs_request_pool);
+ kmem_cache_destroy(netfs_request_slab);
}
module_exit(netfs_exit);
diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c
index 90051ced8e2a..bc1fc54fb724 100644
--- a/fs/netfs/misc.c
+++ b/fs/netfs/misc.c
@@ -177,13 +177,11 @@ EXPORT_SYMBOL(netfs_clear_inode_writeback);
*/
void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
{
- struct netfs_folio *finfo = NULL;
+ struct netfs_folio *finfo;
size_t flen = folio_size(folio);
_enter("{%lx},%zx,%zx", folio->index, offset, length);
- folio_wait_fscache(folio);
-
if (!folio_test_private(folio))
return;
@@ -248,12 +246,6 @@ bool netfs_release_folio(struct folio *folio, gfp_t gfp)
if (folio_test_private(folio))
return false;
- if (folio_test_fscache(folio)) {
- if (current_is_kswapd() || !(gfp & __GFP_FS))
- return false;
- folio_wait_fscache(folio);
- }
-
fscache_note_page_release(netfs_i_cookie(ctx));
return true;
}
diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c
index 610ceb5bd86c..c90d482b1650 100644
--- a/fs/netfs/objects.c
+++ b/fs/netfs/objects.c
@@ -6,6 +6,8 @@
*/
#include <linux/slab.h>
+#include <linux/mempool.h>
+#include <linux/delay.h>
#include "internal.h"
/*
@@ -20,17 +22,22 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
struct inode *inode = file ? file_inode(file) : mapping->host;
struct netfs_inode *ctx = netfs_inode(inode);
struct netfs_io_request *rreq;
+ mempool_t *mempool = ctx->ops->request_pool ?: &netfs_request_pool;
+ struct kmem_cache *cache = mempool->pool_data;
bool is_unbuffered = (origin == NETFS_UNBUFFERED_WRITE ||
origin == NETFS_DIO_READ ||
origin == NETFS_DIO_WRITE);
bool cached = !is_unbuffered && netfs_is_cache_enabled(ctx);
int ret;
- rreq = kzalloc(ctx->ops->io_request_size ?: sizeof(struct netfs_io_request),
- GFP_KERNEL);
- if (!rreq)
- return ERR_PTR(-ENOMEM);
+ for (;;) {
+ rreq = mempool_alloc(mempool, GFP_KERNEL);
+ if (rreq)
+ break;
+ msleep(10);
+ }
+ memset(rreq, 0, kmem_cache_size(cache));
rreq->start = start;
rreq->len = len;
rreq->upper_len = len;
@@ -40,19 +47,27 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
rreq->inode = inode;
rreq->i_size = i_size_read(inode);
rreq->debug_id = atomic_inc_return(&debug_ids);
+ rreq->wsize = INT_MAX;
+ spin_lock_init(&rreq->lock);
+ INIT_LIST_HEAD(&rreq->io_streams[0].subrequests);
+ INIT_LIST_HEAD(&rreq->io_streams[1].subrequests);
INIT_LIST_HEAD(&rreq->subrequests);
INIT_WORK(&rreq->work, NULL);
refcount_set(&rreq->ref, 1);
__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
- if (cached)
+ if (cached) {
__set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags);
+ if (test_bit(NETFS_ICTX_USE_PGPRIV2, &ctx->flags))
+ /* Filesystem uses deprecated PG_private_2 marking. */
+ __set_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags);
+ }
if (file && file->f_flags & O_NONBLOCK)
__set_bit(NETFS_RREQ_NONBLOCK, &rreq->flags);
if (rreq->netfs_ops->init_request) {
ret = rreq->netfs_ops->init_request(rreq, file);
if (ret < 0) {
- kfree(rreq);
+ mempool_free(rreq, rreq->netfs_ops->request_pool ?: &netfs_request_pool);
return ERR_PTR(ret);
}
}
@@ -74,6 +89,8 @@ void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace
void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async)
{
struct netfs_io_subrequest *subreq;
+ struct netfs_io_stream *stream;
+ int s;
while (!list_empty(&rreq->subrequests)) {
subreq = list_first_entry(&rreq->subrequests,
@@ -82,6 +99,25 @@ void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async)
netfs_put_subrequest(subreq, was_async,
netfs_sreq_trace_put_clear);
}
+
+ for (s = 0; s < ARRAY_SIZE(rreq->io_streams); s++) {
+ stream = &rreq->io_streams[s];
+ while (!list_empty(&stream->subrequests)) {
+ subreq = list_first_entry(&stream->subrequests,
+ struct netfs_io_subrequest, rreq_link);
+ list_del(&subreq->rreq_link);
+ netfs_put_subrequest(subreq, was_async,
+ netfs_sreq_trace_put_clear);
+ }
+ }
+}
+
+static void netfs_free_request_rcu(struct rcu_head *rcu)
+{
+ struct netfs_io_request *rreq = container_of(rcu, struct netfs_io_request, rcu);
+
+ mempool_free(rreq, rreq->netfs_ops->request_pool ?: &netfs_request_pool);
+ netfs_stat_d(&netfs_n_rh_rreq);
}
static void netfs_free_request(struct work_struct *work)
@@ -106,8 +142,7 @@ static void netfs_free_request(struct work_struct *work)
}
kvfree(rreq->direct_bv);
}
- kfree_rcu(rreq, rcu);
- netfs_stat_d(&netfs_n_rh_rreq);
+ call_rcu(&rreq->rcu, netfs_free_request_rcu);
}
void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
@@ -139,19 +174,25 @@ void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq)
{
struct netfs_io_subrequest *subreq;
-
- subreq = kzalloc(rreq->netfs_ops->io_subrequest_size ?:
- sizeof(struct netfs_io_subrequest),
- GFP_KERNEL);
- if (subreq) {
- INIT_WORK(&subreq->work, NULL);
- INIT_LIST_HEAD(&subreq->rreq_link);
- refcount_set(&subreq->ref, 2);
- subreq->rreq = rreq;
- netfs_get_request(rreq, netfs_rreq_trace_get_subreq);
- netfs_stat(&netfs_n_rh_sreq);
+ mempool_t *mempool = rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool;
+ struct kmem_cache *cache = mempool->pool_data;
+
+ for (;;) {
+ subreq = mempool_alloc(rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool,
+ GFP_KERNEL);
+ if (subreq)
+ break;
+ msleep(10);
}
+ memset(subreq, 0, kmem_cache_size(cache));
+ INIT_WORK(&subreq->work, NULL);
+ INIT_LIST_HEAD(&subreq->rreq_link);
+ refcount_set(&subreq->ref, 2);
+ subreq->rreq = rreq;
+ subreq->debug_index = atomic_inc_return(&rreq->subreq_counter);
+ netfs_get_request(rreq, netfs_rreq_trace_get_subreq);
+ netfs_stat(&netfs_n_rh_sreq);
return subreq;
}
@@ -173,7 +214,7 @@ static void netfs_free_subrequest(struct netfs_io_subrequest *subreq,
trace_netfs_sreq(subreq, netfs_sreq_trace_free);
if (rreq->netfs_ops->free_subrequest)
rreq->netfs_ops->free_subrequest(subreq);
- kfree(subreq);
+ mempool_free(subreq, rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool);
netfs_stat_d(&netfs_n_rh_sreq);
netfs_put_request(rreq, was_async, netfs_rreq_trace_put_subreq);
}
diff --git a/fs/netfs/output.c b/fs/netfs/output.c
deleted file mode 100644
index 625eb68f3e5a..000000000000
--- a/fs/netfs/output.c
+++ /dev/null
@@ -1,478 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Network filesystem high-level write support.
- *
- * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/slab.h>
-#include <linux/writeback.h>
-#include <linux/pagevec.h>
-#include "internal.h"
-
-/**
- * netfs_create_write_request - Create a write operation.
- * @wreq: The write request this is storing from.
- * @dest: The destination type
- * @start: Start of the region this write will modify
- * @len: Length of the modification
- * @worker: The worker function to handle the write(s)
- *
- * Allocate a write operation, set it up and add it to the list on a write
- * request.
- */
-struct netfs_io_subrequest *netfs_create_write_request(struct netfs_io_request *wreq,
- enum netfs_io_source dest,
- loff_t start, size_t len,
- work_func_t worker)
-{
- struct netfs_io_subrequest *subreq;
-
- subreq = netfs_alloc_subrequest(wreq);
- if (subreq) {
- INIT_WORK(&subreq->work, worker);
- subreq->source = dest;
- subreq->start = start;
- subreq->len = len;
- subreq->debug_index = wreq->subreq_counter++;
-
- switch (subreq->source) {
- case NETFS_UPLOAD_TO_SERVER:
- netfs_stat(&netfs_n_wh_upload);
- break;
- case NETFS_WRITE_TO_CACHE:
- netfs_stat(&netfs_n_wh_write);
- break;
- default:
- BUG();
- }
-
- subreq->io_iter = wreq->io_iter;
- iov_iter_advance(&subreq->io_iter, subreq->start - wreq->start);
- iov_iter_truncate(&subreq->io_iter, subreq->len);
-
- trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
- refcount_read(&subreq->ref),
- netfs_sreq_trace_new);
- atomic_inc(&wreq->nr_outstanding);
- list_add_tail(&subreq->rreq_link, &wreq->subrequests);
- trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
- }
-
- return subreq;
-}
-EXPORT_SYMBOL(netfs_create_write_request);
-
-/*
- * Process a completed write request once all the component operations have
- * been completed.
- */
-static void netfs_write_terminated(struct netfs_io_request *wreq, bool was_async)
-{
- struct netfs_io_subrequest *subreq;
- struct netfs_inode *ctx = netfs_inode(wreq->inode);
- size_t transferred = 0;
-
- _enter("R=%x[]", wreq->debug_id);
-
- trace_netfs_rreq(wreq, netfs_rreq_trace_write_done);
-
- list_for_each_entry(subreq, &wreq->subrequests, rreq_link) {
- if (subreq->error || subreq->transferred == 0)
- break;
- transferred += subreq->transferred;
- if (subreq->transferred < subreq->len)
- break;
- }
- wreq->transferred = transferred;
-
- list_for_each_entry(subreq, &wreq->subrequests, rreq_link) {
- if (!subreq->error)
- continue;
- switch (subreq->source) {
- case NETFS_UPLOAD_TO_SERVER:
- /* Depending on the type of failure, this may prevent
- * writeback completion unless we're in disconnected
- * mode.
- */
- if (!wreq->error)
- wreq->error = subreq->error;
- break;
-
- case NETFS_WRITE_TO_CACHE:
- /* Failure doesn't prevent writeback completion unless
- * we're in disconnected mode.
- */
- if (subreq->error != -ENOBUFS)
- ctx->ops->invalidate_cache(wreq);
- break;
-
- default:
- WARN_ON_ONCE(1);
- if (!wreq->error)
- wreq->error = -EIO;
- return;
- }
- }
-
- wreq->cleanup(wreq);
-
- if (wreq->origin == NETFS_DIO_WRITE &&
- wreq->mapping->nrpages) {
- pgoff_t first = wreq->start >> PAGE_SHIFT;
- pgoff_t last = (wreq->start + wreq->transferred - 1) >> PAGE_SHIFT;
- invalidate_inode_pages2_range(wreq->mapping, first, last);
- }
-
- if (wreq->origin == NETFS_DIO_WRITE)
- inode_dio_end(wreq->inode);
-
- _debug("finished");
- trace_netfs_rreq(wreq, netfs_rreq_trace_wake_ip);
- clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &wreq->flags);
- wake_up_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS);
-
- if (wreq->iocb) {
- wreq->iocb->ki_pos += transferred;
- if (wreq->iocb->ki_complete)
- wreq->iocb->ki_complete(
- wreq->iocb, wreq->error ? wreq->error : transferred);
- }
-
- netfs_clear_subrequests(wreq, was_async);
- netfs_put_request(wreq, was_async, netfs_rreq_trace_put_complete);
-}
-
-/*
- * Deal with the completion of writing the data to the cache.
- */
-void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
- bool was_async)
-{
- struct netfs_io_subrequest *subreq = _op;
- struct netfs_io_request *wreq = subreq->rreq;
- unsigned int u;
-
- _enter("%x[%x] %zd", wreq->debug_id, subreq->debug_index, transferred_or_error);
-
- switch (subreq->source) {
- case NETFS_UPLOAD_TO_SERVER:
- netfs_stat(&netfs_n_wh_upload_done);
- break;
- case NETFS_WRITE_TO_CACHE:
- netfs_stat(&netfs_n_wh_write_done);
- break;
- case NETFS_INVALID_WRITE:
- break;
- default:
- BUG();
- }
-
- if (IS_ERR_VALUE(transferred_or_error)) {
- subreq->error = transferred_or_error;
- trace_netfs_failure(wreq, subreq, transferred_or_error,
- netfs_fail_write);
- goto failed;
- }
-
- if (WARN(transferred_or_error > subreq->len - subreq->transferred,
- "Subreq excess write: R%x[%x] %zd > %zu - %zu",
- wreq->debug_id, subreq->debug_index,
- transferred_or_error, subreq->len, subreq->transferred))
- transferred_or_error = subreq->len - subreq->transferred;
-
- subreq->error = 0;
- subreq->transferred += transferred_or_error;
-
- if (iov_iter_count(&subreq->io_iter) != subreq->len - subreq->transferred)
- pr_warn("R=%08x[%u] ITER POST-MISMATCH %zx != %zx-%zx %x\n",
- wreq->debug_id, subreq->debug_index,
- iov_iter_count(&subreq->io_iter), subreq->len,
- subreq->transferred, subreq->io_iter.iter_type);
-
- if (subreq->transferred < subreq->len)
- goto incomplete;
-
- __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
-out:
- trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
-
- /* If we decrement nr_outstanding to 0, the ref belongs to us. */
- u = atomic_dec_return(&wreq->nr_outstanding);
- if (u == 0)
- netfs_write_terminated(wreq, was_async);
- else if (u == 1)
- wake_up_var(&wreq->nr_outstanding);
-
- netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
- return;
-
-incomplete:
- if (transferred_or_error == 0) {
- if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
- subreq->error = -ENODATA;
- goto failed;
- }
- } else {
- __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
- }
-
- __set_bit(NETFS_SREQ_SHORT_IO, &subreq->flags);
- set_bit(NETFS_RREQ_INCOMPLETE_IO, &wreq->flags);
- goto out;
-
-failed:
- switch (subreq->source) {
- case NETFS_WRITE_TO_CACHE:
- netfs_stat(&netfs_n_wh_write_failed);
- set_bit(NETFS_RREQ_INCOMPLETE_IO, &wreq->flags);
- break;
- case NETFS_UPLOAD_TO_SERVER:
- netfs_stat(&netfs_n_wh_upload_failed);
- set_bit(NETFS_RREQ_FAILED, &wreq->flags);
- wreq->error = subreq->error;
- break;
- default:
- break;
- }
- goto out;
-}
-EXPORT_SYMBOL(netfs_write_subrequest_terminated);
-
-static void netfs_write_to_cache_op(struct netfs_io_subrequest *subreq)
-{
- struct netfs_io_request *wreq = subreq->rreq;
- struct netfs_cache_resources *cres = &wreq->cache_resources;
-
- trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
-
- cres->ops->write(cres, subreq->start, &subreq->io_iter,
- netfs_write_subrequest_terminated, subreq);
-}
-
-static void netfs_write_to_cache_op_worker(struct work_struct *work)
-{
- struct netfs_io_subrequest *subreq =
- container_of(work, struct netfs_io_subrequest, work);
-
- netfs_write_to_cache_op(subreq);
-}
-
-/**
- * netfs_queue_write_request - Queue a write request for attention
- * @subreq: The write request to be queued
- *
- * Queue the specified write request for processing by a worker thread. We
- * pass the caller's ref on the request to the worker thread.
- */
-void netfs_queue_write_request(struct netfs_io_subrequest *subreq)
-{
- if (!queue_work(system_unbound_wq, &subreq->work))
- netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_wip);
-}
-EXPORT_SYMBOL(netfs_queue_write_request);
-
-/*
- * Set up a op for writing to the cache.
- */
-static void netfs_set_up_write_to_cache(struct netfs_io_request *wreq)
-{
- struct netfs_cache_resources *cres = &wreq->cache_resources;
- struct netfs_io_subrequest *subreq;
- struct netfs_inode *ctx = netfs_inode(wreq->inode);
- struct fscache_cookie *cookie = netfs_i_cookie(ctx);
- loff_t start = wreq->start;
- size_t len = wreq->len;
- int ret;
-
- if (!fscache_cookie_enabled(cookie)) {
- clear_bit(NETFS_RREQ_WRITE_TO_CACHE, &wreq->flags);
- return;
- }
-
- _debug("write to cache");
- ret = fscache_begin_write_operation(cres, cookie);
- if (ret < 0)
- return;
-
- ret = cres->ops->prepare_write(cres, &start, &len, wreq->upper_len,
- i_size_read(wreq->inode), true);
- if (ret < 0)
- return;
-
- subreq = netfs_create_write_request(wreq, NETFS_WRITE_TO_CACHE, start, len,
- netfs_write_to_cache_op_worker);
- if (!subreq)
- return;
-
- netfs_write_to_cache_op(subreq);
-}
-
-/*
- * Begin the process of writing out a chunk of data.
- *
- * We are given a write request that holds a series of dirty regions and
- * (partially) covers a sequence of folios, all of which are present. The
- * pages must have been marked as writeback as appropriate.
- *
- * We need to perform the following steps:
- *
- * (1) If encrypting, create an output buffer and encrypt each block of the
- * data into it, otherwise the output buffer will point to the original
- * folios.
- *
- * (2) If the data is to be cached, set up a write op for the entire output
- * buffer to the cache, if the cache wants to accept it.
- *
- * (3) If the data is to be uploaded (ie. not merely cached):
- *
- * (a) If the data is to be compressed, create a compression buffer and
- * compress the data into it.
- *
- * (b) For each destination we want to upload to, set up write ops to write
- * to that destination. We may need multiple writes if the data is not
- * contiguous or the span exceeds wsize for a server.
- */
-int netfs_begin_write(struct netfs_io_request *wreq, bool may_wait,
- enum netfs_write_trace what)
-{
- struct netfs_inode *ctx = netfs_inode(wreq->inode);
-
- _enter("R=%x %llx-%llx f=%lx",
- wreq->debug_id, wreq->start, wreq->start + wreq->len - 1,
- wreq->flags);
-
- trace_netfs_write(wreq, what);
- if (wreq->len == 0 || wreq->iter.count == 0) {
- pr_err("Zero-sized write [R=%x]\n", wreq->debug_id);
- return -EIO;
- }
-
- if (wreq->origin == NETFS_DIO_WRITE)
- inode_dio_begin(wreq->inode);
-
- wreq->io_iter = wreq->iter;
-
- /* ->outstanding > 0 carries a ref */
- netfs_get_request(wreq, netfs_rreq_trace_get_for_outstanding);
- atomic_set(&wreq->nr_outstanding, 1);
-
- /* Start the encryption/compression going. We can do that in the
- * background whilst we generate a list of write ops that we want to
- * perform.
- */
- // TODO: Encrypt or compress the region as appropriate
-
- /* We need to write all of the region to the cache */
- if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &wreq->flags))
- netfs_set_up_write_to_cache(wreq);
-
- /* However, we don't necessarily write all of the region to the server.
- * Caching of reads is being managed this way also.
- */
- if (test_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags))
- ctx->ops->create_write_requests(wreq, wreq->start, wreq->len);
-
- if (atomic_dec_and_test(&wreq->nr_outstanding))
- netfs_write_terminated(wreq, false);
-
- if (!may_wait)
- return -EIOCBQUEUED;
-
- wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS,
- TASK_UNINTERRUPTIBLE);
- return wreq->error;
-}
-
-/*
- * Begin a write operation for writing through the pagecache.
- */
-struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len)
-{
- struct netfs_io_request *wreq;
- struct file *file = iocb->ki_filp;
-
- wreq = netfs_alloc_request(file->f_mapping, file, iocb->ki_pos, len,
- NETFS_WRITETHROUGH);
- if (IS_ERR(wreq))
- return wreq;
-
- trace_netfs_write(wreq, netfs_write_trace_writethrough);
-
- __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
- iov_iter_xarray(&wreq->iter, ITER_SOURCE, &wreq->mapping->i_pages, wreq->start, 0);
- wreq->io_iter = wreq->iter;
-
- /* ->outstanding > 0 carries a ref */
- netfs_get_request(wreq, netfs_rreq_trace_get_for_outstanding);
- atomic_set(&wreq->nr_outstanding, 1);
- return wreq;
-}
-
-static void netfs_submit_writethrough(struct netfs_io_request *wreq, bool final)
-{
- struct netfs_inode *ictx = netfs_inode(wreq->inode);
- unsigned long long start;
- size_t len;
-
- if (!test_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags))
- return;
-
- start = wreq->start + wreq->submitted;
- len = wreq->iter.count - wreq->submitted;
- if (!final) {
- len /= wreq->wsize; /* Round to number of maximum packets */
- len *= wreq->wsize;
- }
-
- ictx->ops->create_write_requests(wreq, start, len);
- wreq->submitted += len;
-}
-
-/*
- * Advance the state of the write operation used when writing through the
- * pagecache. Data has been copied into the pagecache that we need to append
- * to the request. If we've added more than wsize then we need to create a new
- * subrequest.
- */
-int netfs_advance_writethrough(struct netfs_io_request *wreq, size_t copied, bool to_page_end)
-{
- _enter("ic=%zu sb=%zu ws=%u cp=%zu tp=%u",
- wreq->iter.count, wreq->submitted, wreq->wsize, copied, to_page_end);
-
- wreq->iter.count += copied;
- wreq->io_iter.count += copied;
- if (to_page_end && wreq->io_iter.count - wreq->submitted >= wreq->wsize)
- netfs_submit_writethrough(wreq, false);
-
- return wreq->error;
-}
-
-/*
- * End a write operation used when writing through the pagecache.
- */
-int netfs_end_writethrough(struct netfs_io_request *wreq, struct kiocb *iocb)
-{
- int ret = -EIOCBQUEUED;
-
- _enter("ic=%zu sb=%zu ws=%u",
- wreq->iter.count, wreq->submitted, wreq->wsize);
-
- if (wreq->submitted < wreq->io_iter.count)
- netfs_submit_writethrough(wreq, true);
-
- if (atomic_dec_and_test(&wreq->nr_outstanding))
- netfs_write_terminated(wreq, false);
-
- if (is_sync_kiocb(iocb)) {
- wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS,
- TASK_UNINTERRUPTIBLE);
- ret = wreq->error;
- }
-
- netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
- return ret;
-}
diff --git a/fs/netfs/stats.c b/fs/netfs/stats.c
index deeba9f9dcf5..0892768eea32 100644
--- a/fs/netfs/stats.c
+++ b/fs/netfs/stats.c
@@ -10,9 +10,8 @@
#include "internal.h"
atomic_t netfs_n_rh_dio_read;
-atomic_t netfs_n_rh_dio_write;
atomic_t netfs_n_rh_readahead;
-atomic_t netfs_n_rh_readpage;
+atomic_t netfs_n_rh_read_folio;
atomic_t netfs_n_rh_rreq;
atomic_t netfs_n_rh_sreq;
atomic_t netfs_n_rh_download;
@@ -29,6 +28,10 @@ atomic_t netfs_n_rh_write_begin;
atomic_t netfs_n_rh_write_done;
atomic_t netfs_n_rh_write_failed;
atomic_t netfs_n_rh_write_zskip;
+atomic_t netfs_n_wh_buffered_write;
+atomic_t netfs_n_wh_writethrough;
+atomic_t netfs_n_wh_dio_write;
+atomic_t netfs_n_wh_writepages;
atomic_t netfs_n_wh_wstream_conflict;
atomic_t netfs_n_wh_upload;
atomic_t netfs_n_wh_upload_done;
@@ -39,13 +42,17 @@ atomic_t netfs_n_wh_write_failed;
int netfs_stats_show(struct seq_file *m, void *v)
{
- seq_printf(m, "Netfs : DR=%u DW=%u RA=%u RP=%u WB=%u WBZ=%u\n",
+ seq_printf(m, "Netfs : DR=%u RA=%u RF=%u WB=%u WBZ=%u\n",
atomic_read(&netfs_n_rh_dio_read),
- atomic_read(&netfs_n_rh_dio_write),
atomic_read(&netfs_n_rh_readahead),
- atomic_read(&netfs_n_rh_readpage),
+ atomic_read(&netfs_n_rh_read_folio),
atomic_read(&netfs_n_rh_write_begin),
atomic_read(&netfs_n_rh_write_zskip));
+ seq_printf(m, "Netfs : BW=%u WT=%u DW=%u WP=%u\n",
+ atomic_read(&netfs_n_wh_buffered_write),
+ atomic_read(&netfs_n_wh_writethrough),
+ atomic_read(&netfs_n_wh_dio_write),
+ atomic_read(&netfs_n_wh_writepages));
seq_printf(m, "Netfs : ZR=%u sh=%u sk=%u\n",
atomic_read(&netfs_n_rh_zero),
atomic_read(&netfs_n_rh_short_read),
diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
new file mode 100644
index 000000000000..60112e4b2c5e
--- /dev/null
+++ b/fs/netfs/write_collect.c
@@ -0,0 +1,808 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Network filesystem write subrequest result collection, assessment
+ * and retrying.
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+/* Notes made in the collector */
+#define HIT_PENDING 0x01 /* A front op was still pending */
+#define SOME_EMPTY 0x02 /* One of more streams are empty */
+#define ALL_EMPTY 0x04 /* All streams are empty */
+#define MAYBE_DISCONTIG 0x08 /* A front op may be discontiguous (rounded to PAGE_SIZE) */
+#define NEED_REASSESS 0x10 /* Need to loop round and reassess */
+#define REASSESS_DISCONTIG 0x20 /* Reassess discontiguity if contiguity advances */
+#define MADE_PROGRESS 0x40 /* Made progress cleaning up a stream or the folio set */
+#define BUFFERED 0x80 /* The pagecache needs cleaning up */
+#define NEED_RETRY 0x100 /* A front op requests retrying */
+#define SAW_FAILURE 0x200 /* One stream or hit a permanent failure */
+
+/*
+ * Successful completion of write of a folio to the server and/or cache. Note
+ * that we are not allowed to lock the folio here on pain of deadlocking with
+ * truncate.
+ */
+int netfs_folio_written_back(struct folio *folio)
+{
+ enum netfs_folio_trace why = netfs_folio_trace_clear;
+ struct netfs_folio *finfo;
+ struct netfs_group *group = NULL;
+ int gcount = 0;
+
+ if ((finfo = netfs_folio_info(folio))) {
+ /* Streaming writes cannot be redirtied whilst under writeback,
+ * so discard the streaming record.
+ */
+ folio_detach_private(folio);
+ group = finfo->netfs_group;
+ gcount++;
+ kfree(finfo);
+ why = netfs_folio_trace_clear_s;
+ goto end_wb;
+ }
+
+ if ((group = netfs_folio_group(folio))) {
+ if (group == NETFS_FOLIO_COPY_TO_CACHE) {
+ why = netfs_folio_trace_clear_cc;
+ folio_detach_private(folio);
+ goto end_wb;
+ }
+
+ /* Need to detach the group pointer if the page didn't get
+ * redirtied. If it has been redirtied, then it must be within
+ * the same group.
+ */
+ why = netfs_folio_trace_redirtied;
+ if (!folio_test_dirty(folio)) {
+ folio_detach_private(folio);
+ gcount++;
+ why = netfs_folio_trace_clear_g;
+ }
+ }
+
+end_wb:
+ trace_netfs_folio(folio, why);
+ folio_end_writeback(folio);
+ return gcount;
+}
+
+/*
+ * Get hold of a folio we have under writeback. We don't want to get the
+ * refcount on it.
+ */
+static struct folio *netfs_writeback_lookup_folio(struct netfs_io_request *wreq, loff_t pos)
+{
+ XA_STATE(xas, &wreq->mapping->i_pages, pos / PAGE_SIZE);
+ struct folio *folio;
+
+ rcu_read_lock();
+
+ for (;;) {
+ xas_reset(&xas);
+ folio = xas_load(&xas);
+ if (xas_retry(&xas, folio))
+ continue;
+
+ if (!folio || xa_is_value(folio))
+ kdebug("R=%08x: folio %lx (%llx) not present",
+ wreq->debug_id, xas.xa_index, pos / PAGE_SIZE);
+ BUG_ON(!folio || xa_is_value(folio));
+
+ if (folio == xas_reload(&xas))
+ break;
+ }
+
+ rcu_read_unlock();
+
+ if (WARN_ONCE(!folio_test_writeback(folio),
+ "R=%08x: folio %lx is not under writeback\n",
+ wreq->debug_id, folio->index)) {
+ trace_netfs_folio(folio, netfs_folio_trace_not_under_wback);
+ }
+ return folio;
+}
+
+/*
+ * Unlock any folios we've finished with.
+ */
+static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
+ unsigned long long collected_to,
+ unsigned int *notes)
+{
+ for (;;) {
+ struct folio *folio;
+ struct netfs_folio *finfo;
+ unsigned long long fpos, fend;
+ size_t fsize, flen;
+
+ folio = netfs_writeback_lookup_folio(wreq, wreq->cleaned_to);
+
+ fpos = folio_pos(folio);
+ fsize = folio_size(folio);
+ finfo = netfs_folio_info(folio);
+ flen = finfo ? finfo->dirty_offset + finfo->dirty_len : fsize;
+
+ fend = min_t(unsigned long long, fpos + flen, wreq->i_size);
+
+ trace_netfs_collect_folio(wreq, folio, fend, collected_to);
+
+ if (fpos + fsize > wreq->contiguity) {
+ trace_netfs_collect_contig(wreq, fpos + fsize,
+ netfs_contig_trace_unlock);
+ wreq->contiguity = fpos + fsize;
+ }
+
+ /* Unlock any folio we've transferred all of. */
+ if (collected_to < fend)
+ break;
+
+ wreq->nr_group_rel += netfs_folio_written_back(folio);
+ wreq->cleaned_to = fpos + fsize;
+ *notes |= MADE_PROGRESS;
+
+ if (fpos + fsize >= collected_to)
+ break;
+ }
+}
+
+/*
+ * Perform retries on the streams that need it.
+ */
+static void netfs_retry_write_stream(struct netfs_io_request *wreq,
+ struct netfs_io_stream *stream)
+{
+ struct list_head *next;
+
+ _enter("R=%x[%x:]", wreq->debug_id, stream->stream_nr);
+
+ if (list_empty(&stream->subrequests))
+ return;
+
+ if (stream->source == NETFS_UPLOAD_TO_SERVER &&
+ wreq->netfs_ops->retry_request)
+ wreq->netfs_ops->retry_request(wreq, stream);
+
+ if (unlikely(stream->failed))
+ return;
+
+ /* If there's no renegotiation to do, just resend each failed subreq. */
+ if (!stream->prepare_write) {
+ struct netfs_io_subrequest *subreq;
+
+ list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
+ if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
+ break;
+ if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
+ __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
+ netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
+ netfs_reissue_write(stream, subreq);
+ }
+ }
+ return;
+ }
+
+ next = stream->subrequests.next;
+
+ do {
+ struct netfs_io_subrequest *subreq = NULL, *from, *to, *tmp;
+ unsigned long long start, len;
+ size_t part;
+ bool boundary = false;
+
+ /* Go through the stream and find the next span of contiguous
+ * data that we then rejig (cifs, for example, needs the wsize
+ * renegotiating) and reissue.
+ */
+ from = list_entry(next, struct netfs_io_subrequest, rreq_link);
+ to = from;
+ start = from->start + from->transferred;
+ len = from->len - from->transferred;
+
+ if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
+ !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
+ return;
+
+ list_for_each_continue(next, &stream->subrequests) {
+ subreq = list_entry(next, struct netfs_io_subrequest, rreq_link);
+ if (subreq->start + subreq->transferred != start + len ||
+ test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) ||
+ !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
+ break;
+ to = subreq;
+ len += to->len;
+ }
+
+ /* Work through the sublist. */
+ subreq = from;
+ list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
+ if (!len)
+ break;
+ /* Renegotiate max_len (wsize) */
+ trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
+ __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
+ __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
+ stream->prepare_write(subreq);
+
+ part = min(len, subreq->max_len);
+ subreq->len = part;
+ subreq->start = start;
+ subreq->transferred = 0;
+ len -= part;
+ start += part;
+ if (len && subreq == to &&
+ __test_and_clear_bit(NETFS_SREQ_BOUNDARY, &to->flags))
+ boundary = true;
+
+ netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
+ netfs_reissue_write(stream, subreq);
+ if (subreq == to)
+ break;
+ }
+
+ /* If we managed to use fewer subreqs, we can discard the
+ * excess; if we used the same number, then we're done.
+ */
+ if (!len) {
+ if (subreq == to)
+ continue;
+ list_for_each_entry_safe_from(subreq, tmp,
+ &stream->subrequests, rreq_link) {
+ trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
+ list_del(&subreq->rreq_link);
+ netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done);
+ if (subreq == to)
+ break;
+ }
+ continue;
+ }
+
+ /* We ran out of subrequests, so we need to allocate some more
+ * and insert them after.
+ */
+ do {
+ subreq = netfs_alloc_subrequest(wreq);
+ subreq->source = to->source;
+ subreq->start = start;
+ subreq->max_len = len;
+ subreq->max_nr_segs = INT_MAX;
+ subreq->debug_index = atomic_inc_return(&wreq->subreq_counter);
+ subreq->stream_nr = to->stream_nr;
+ __set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
+
+ trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
+ refcount_read(&subreq->ref),
+ netfs_sreq_trace_new);
+ netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
+
+ list_add(&subreq->rreq_link, &to->rreq_link);
+ to = list_next_entry(to, rreq_link);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
+
+ switch (stream->source) {
+ case NETFS_UPLOAD_TO_SERVER:
+ netfs_stat(&netfs_n_wh_upload);
+ subreq->max_len = min(len, wreq->wsize);
+ break;
+ case NETFS_WRITE_TO_CACHE:
+ netfs_stat(&netfs_n_wh_write);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ stream->prepare_write(subreq);
+
+ part = min(len, subreq->max_len);
+ subreq->len = subreq->transferred + part;
+ len -= part;
+ start += part;
+ if (!len && boundary) {
+ __set_bit(NETFS_SREQ_BOUNDARY, &to->flags);
+ boundary = false;
+ }
+
+ netfs_reissue_write(stream, subreq);
+ if (!len)
+ break;
+
+ } while (len);
+
+ } while (!list_is_head(next, &stream->subrequests));
+}
+
+/*
+ * Perform retries on the streams that need it. If we're doing content
+ * encryption and the server copy changed due to a third-party write, we may
+ * need to do an RMW cycle and also rewrite the data to the cache.
+ */
+static void netfs_retry_writes(struct netfs_io_request *wreq)
+{
+ struct netfs_io_subrequest *subreq;
+ struct netfs_io_stream *stream;
+ int s;
+
+ /* Wait for all outstanding I/O to quiesce before performing retries as
+ * we may need to renegotiate the I/O sizes.
+ */
+ for (s = 0; s < NR_IO_STREAMS; s++) {
+ stream = &wreq->io_streams[s];
+ if (!stream->active)
+ continue;
+
+ list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
+ wait_on_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS,
+ TASK_UNINTERRUPTIBLE);
+ }
+ }
+
+ // TODO: Enc: Fetch changed partial pages
+ // TODO: Enc: Reencrypt content if needed.
+ // TODO: Enc: Wind back transferred point.
+ // TODO: Enc: Mark cache pages for retry.
+
+ for (s = 0; s < NR_IO_STREAMS; s++) {
+ stream = &wreq->io_streams[s];
+ if (stream->need_retry) {
+ stream->need_retry = false;
+ netfs_retry_write_stream(wreq, stream);
+ }
+ }
+}
+
+/*
+ * Collect and assess the results of various write subrequests. We may need to
+ * retry some of the results - or even do an RMW cycle for content crypto.
+ *
+ * Note that we have a number of parallel, overlapping lists of subrequests,
+ * one to the server and one to the local cache for example, which may not be
+ * the same size or starting position and may not even correspond in boundary
+ * alignment.
+ */
+static void netfs_collect_write_results(struct netfs_io_request *wreq)
+{
+ struct netfs_io_subrequest *front, *remove;
+ struct netfs_io_stream *stream;
+ unsigned long long collected_to;
+ unsigned int notes;
+ int s;
+
+ _enter("%llx-%llx", wreq->start, wreq->start + wreq->len);
+ trace_netfs_collect(wreq);
+ trace_netfs_rreq(wreq, netfs_rreq_trace_collect);
+
+reassess_streams:
+ smp_rmb();
+ collected_to = ULLONG_MAX;
+ if (wreq->origin == NETFS_WRITEBACK)
+ notes = ALL_EMPTY | BUFFERED | MAYBE_DISCONTIG;
+ else if (wreq->origin == NETFS_WRITETHROUGH)
+ notes = ALL_EMPTY | BUFFERED;
+ else
+ notes = ALL_EMPTY;
+
+ /* Remove completed subrequests from the front of the streams and
+ * advance the completion point on each stream. We stop when we hit
+ * something that's in progress. The issuer thread may be adding stuff
+ * to the tail whilst we're doing this.
+ *
+ * We must not, however, merge in discontiguities that span whole
+ * folios that aren't under writeback. This is made more complicated
+ * by the folios in the gap being of unpredictable sizes - if they even
+ * exist - but we don't want to look them up.
+ */
+ for (s = 0; s < NR_IO_STREAMS; s++) {
+ loff_t rstart, rend;
+
+ stream = &wreq->io_streams[s];
+ /* Read active flag before list pointers */
+ if (!smp_load_acquire(&stream->active))
+ continue;
+
+ front = stream->front;
+ while (front) {
+ trace_netfs_collect_sreq(wreq, front);
+ //_debug("sreq [%x] %llx %zx/%zx",
+ // front->debug_index, front->start, front->transferred, front->len);
+
+ /* Stall if there may be a discontinuity. */
+ rstart = round_down(front->start, PAGE_SIZE);
+ if (rstart > wreq->contiguity) {
+ if (wreq->contiguity > stream->collected_to) {
+ trace_netfs_collect_gap(wreq, stream,
+ wreq->contiguity, 'D');
+ stream->collected_to = wreq->contiguity;
+ }
+ notes |= REASSESS_DISCONTIG;
+ break;
+ }
+ rend = round_up(front->start + front->len, PAGE_SIZE);
+ if (rend > wreq->contiguity) {
+ trace_netfs_collect_contig(wreq, rend,
+ netfs_contig_trace_collect);
+ wreq->contiguity = rend;
+ if (notes & REASSESS_DISCONTIG)
+ notes |= NEED_REASSESS;
+ }
+ notes &= ~MAYBE_DISCONTIG;
+
+ /* Stall if the front is still undergoing I/O. */
+ if (test_bit(NETFS_SREQ_IN_PROGRESS, &front->flags)) {
+ notes |= HIT_PENDING;
+ break;
+ }
+ smp_rmb(); /* Read counters after I-P flag. */
+
+ if (stream->failed) {
+ stream->collected_to = front->start + front->len;
+ notes |= MADE_PROGRESS | SAW_FAILURE;
+ goto cancel;
+ }
+ if (front->start + front->transferred > stream->collected_to) {
+ stream->collected_to = front->start + front->transferred;
+ stream->transferred = stream->collected_to - wreq->start;
+ notes |= MADE_PROGRESS;
+ }
+ if (test_bit(NETFS_SREQ_FAILED, &front->flags)) {
+ stream->failed = true;
+ stream->error = front->error;
+ if (stream->source == NETFS_UPLOAD_TO_SERVER)
+ mapping_set_error(wreq->mapping, front->error);
+ notes |= NEED_REASSESS | SAW_FAILURE;
+ break;
+ }
+ if (front->transferred < front->len) {
+ stream->need_retry = true;
+ notes |= NEED_RETRY | MADE_PROGRESS;
+ break;
+ }
+
+ cancel:
+ /* Remove if completely consumed. */
+ spin_lock(&wreq->lock);
+
+ remove = front;
+ list_del_init(&front->rreq_link);
+ front = list_first_entry_or_null(&stream->subrequests,
+ struct netfs_io_subrequest, rreq_link);
+ stream->front = front;
+ if (!front) {
+ unsigned long long jump_to = atomic64_read(&wreq->issued_to);
+
+ if (stream->collected_to < jump_to) {
+ trace_netfs_collect_gap(wreq, stream, jump_to, 'A');
+ stream->collected_to = jump_to;
+ }
+ }
+
+ spin_unlock(&wreq->lock);
+ netfs_put_subrequest(remove, false,
+ notes & SAW_FAILURE ?
+ netfs_sreq_trace_put_cancel :
+ netfs_sreq_trace_put_done);
+ }
+
+ if (front)
+ notes &= ~ALL_EMPTY;
+ else
+ notes |= SOME_EMPTY;
+
+ if (stream->collected_to < collected_to)
+ collected_to = stream->collected_to;
+ }
+
+ if (collected_to != ULLONG_MAX && collected_to > wreq->collected_to)
+ wreq->collected_to = collected_to;
+
+ /* If we have an empty stream, we need to jump it forward over any gap
+ * otherwise the collection point will never advance.
+ *
+ * Note that the issuer always adds to the stream with the lowest
+ * so-far submitted start, so if we see two consecutive subreqs in one
+ * stream with nothing between then in another stream, then the second
+ * stream has a gap that can be jumped.
+ */
+ if (notes & SOME_EMPTY) {
+ unsigned long long jump_to = wreq->start + wreq->len;
+
+ for (s = 0; s < NR_IO_STREAMS; s++) {
+ stream = &wreq->io_streams[s];
+ if (stream->active &&
+ stream->front &&
+ stream->front->start < jump_to)
+ jump_to = stream->front->start;
+ }
+
+ for (s = 0; s < NR_IO_STREAMS; s++) {
+ stream = &wreq->io_streams[s];
+ if (stream->active &&
+ !stream->front &&
+ stream->collected_to < jump_to) {
+ trace_netfs_collect_gap(wreq, stream, jump_to, 'B');
+ stream->collected_to = jump_to;
+ }
+ }
+ }
+
+ for (s = 0; s < NR_IO_STREAMS; s++) {
+ stream = &wreq->io_streams[s];
+ if (stream->active)
+ trace_netfs_collect_stream(wreq, stream);
+ }
+
+ trace_netfs_collect_state(wreq, wreq->collected_to, notes);
+
+ /* Unlock any folios that we have now finished with. */
+ if (notes & BUFFERED) {
+ unsigned long long clean_to = min(wreq->collected_to, wreq->contiguity);
+
+ if (wreq->cleaned_to < clean_to)
+ netfs_writeback_unlock_folios(wreq, clean_to, &notes);
+ } else {
+ wreq->cleaned_to = wreq->collected_to;
+ }
+
+ // TODO: Discard encryption buffers
+
+ /* If all streams are discontiguous with the last folio we cleared, we
+ * may need to skip a set of folios.
+ */
+ if ((notes & (MAYBE_DISCONTIG | ALL_EMPTY)) == MAYBE_DISCONTIG) {
+ unsigned long long jump_to = ULLONG_MAX;
+
+ for (s = 0; s < NR_IO_STREAMS; s++) {
+ stream = &wreq->io_streams[s];
+ if (stream->active && stream->front &&
+ stream->front->start < jump_to)
+ jump_to = stream->front->start;
+ }
+
+ trace_netfs_collect_contig(wreq, jump_to, netfs_contig_trace_jump);
+ wreq->contiguity = jump_to;
+ wreq->cleaned_to = jump_to;
+ wreq->collected_to = jump_to;
+ for (s = 0; s < NR_IO_STREAMS; s++) {
+ stream = &wreq->io_streams[s];
+ if (stream->collected_to < jump_to)
+ stream->collected_to = jump_to;
+ }
+ //cond_resched();
+ notes |= MADE_PROGRESS;
+ goto reassess_streams;
+ }
+
+ if (notes & NEED_RETRY)
+ goto need_retry;
+ if ((notes & MADE_PROGRESS) && test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) {
+ trace_netfs_rreq(wreq, netfs_rreq_trace_unpause);
+ clear_bit_unlock(NETFS_RREQ_PAUSE, &wreq->flags);
+ wake_up_bit(&wreq->flags, NETFS_RREQ_PAUSE);
+ }
+
+ if (notes & NEED_REASSESS) {
+ //cond_resched();
+ goto reassess_streams;
+ }
+ if (notes & MADE_PROGRESS) {
+ //cond_resched();
+ goto reassess_streams;
+ }
+
+out:
+ netfs_put_group_many(wreq->group, wreq->nr_group_rel);
+ wreq->nr_group_rel = 0;
+ _leave(" = %x", notes);
+ return;
+
+need_retry:
+ /* Okay... We're going to have to retry one or both streams. Note
+ * that any partially completed op will have had any wholly transferred
+ * folios removed from it.
+ */
+ _debug("retry");
+ netfs_retry_writes(wreq);
+ goto out;
+}
+
+/*
+ * Perform the collection of subrequests, folios and encryption buffers.
+ */
+void netfs_write_collection_worker(struct work_struct *work)
+{
+ struct netfs_io_request *wreq = container_of(work, struct netfs_io_request, work);
+ struct netfs_inode *ictx = netfs_inode(wreq->inode);
+ size_t transferred;
+ int s;
+
+ _enter("R=%x", wreq->debug_id);
+
+ netfs_see_request(wreq, netfs_rreq_trace_see_work);
+ if (!test_bit(NETFS_RREQ_IN_PROGRESS, &wreq->flags)) {
+ netfs_put_request(wreq, false, netfs_rreq_trace_put_work);
+ return;
+ }
+
+ netfs_collect_write_results(wreq);
+
+ /* We're done when the app thread has finished posting subreqs and all
+ * the queues in all the streams are empty.
+ */
+ if (!test_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags)) {
+ netfs_put_request(wreq, false, netfs_rreq_trace_put_work);
+ return;
+ }
+ smp_rmb(); /* Read ALL_QUEUED before lists. */
+
+ transferred = LONG_MAX;
+ for (s = 0; s < NR_IO_STREAMS; s++) {
+ struct netfs_io_stream *stream = &wreq->io_streams[s];
+ if (!stream->active)
+ continue;
+ if (!list_empty(&stream->subrequests)) {
+ netfs_put_request(wreq, false, netfs_rreq_trace_put_work);
+ return;
+ }
+ if (stream->transferred < transferred)
+ transferred = stream->transferred;
+ }
+
+ /* Okay, declare that all I/O is complete. */
+ wreq->transferred = transferred;
+ trace_netfs_rreq(wreq, netfs_rreq_trace_write_done);
+
+ if (wreq->io_streams[1].active &&
+ wreq->io_streams[1].failed) {
+ /* Cache write failure doesn't prevent writeback completion
+ * unless we're in disconnected mode.
+ */
+ ictx->ops->invalidate_cache(wreq);
+ }
+
+ if (wreq->cleanup)
+ wreq->cleanup(wreq);
+
+ if (wreq->origin == NETFS_DIO_WRITE &&
+ wreq->mapping->nrpages) {
+ /* mmap may have got underfoot and we may now have folios
+ * locally covering the region we just wrote. Attempt to
+ * discard the folios, but leave in place any modified locally.
+ * ->write_iter() is prevented from interfering by the DIO
+ * counter.
+ */
+ pgoff_t first = wreq->start >> PAGE_SHIFT;
+ pgoff_t last = (wreq->start + wreq->transferred - 1) >> PAGE_SHIFT;
+ invalidate_inode_pages2_range(wreq->mapping, first, last);
+ }
+
+ if (wreq->origin == NETFS_DIO_WRITE)
+ inode_dio_end(wreq->inode);
+
+ _debug("finished");
+ trace_netfs_rreq(wreq, netfs_rreq_trace_wake_ip);
+ clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &wreq->flags);
+ wake_up_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS);
+
+ if (wreq->iocb) {
+ wreq->iocb->ki_pos += wreq->transferred;
+ if (wreq->iocb->ki_complete)
+ wreq->iocb->ki_complete(
+ wreq->iocb, wreq->error ? wreq->error : wreq->transferred);
+ wreq->iocb = VFS_PTR_POISON;
+ }
+
+ netfs_clear_subrequests(wreq, false);
+ netfs_put_request(wreq, false, netfs_rreq_trace_put_work_complete);
+}
+
+/*
+ * Wake the collection work item.
+ */
+void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async)
+{
+ if (!work_pending(&wreq->work)) {
+ netfs_get_request(wreq, netfs_rreq_trace_get_work);
+ if (!queue_work(system_unbound_wq, &wreq->work))
+ netfs_put_request(wreq, was_async, netfs_rreq_trace_put_work_nq);
+ }
+}
+
+/**
+ * netfs_write_subrequest_terminated - Note the termination of a write operation.
+ * @_op: The I/O request that has terminated.
+ * @transferred_or_error: The amount of data transferred or an error code.
+ * @was_async: The termination was asynchronous
+ *
+ * This tells the library that a contributory write I/O operation has
+ * terminated, one way or another, and that it should collect the results.
+ *
+ * The caller indicates in @transferred_or_error the outcome of the operation,
+ * supplying a positive value to indicate the number of bytes transferred or a
+ * negative error code. The library will look after reissuing I/O operations
+ * as appropriate and writing downloaded data to the cache.
+ *
+ * If @was_async is true, the caller might be running in softirq or interrupt
+ * context and we can't sleep.
+ *
+ * When this is called, ownership of the subrequest is transferred back to the
+ * library, along with a ref.
+ *
+ * Note that %_op is a void* so that the function can be passed to
+ * kiocb::term_func without the need for a casting wrapper.
+ */
+void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
+ bool was_async)
+{
+ struct netfs_io_subrequest *subreq = _op;
+ struct netfs_io_request *wreq = subreq->rreq;
+ struct netfs_io_stream *stream = &wreq->io_streams[subreq->stream_nr];
+
+ _enter("%x[%x] %zd", wreq->debug_id, subreq->debug_index, transferred_or_error);
+
+ switch (subreq->source) {
+ case NETFS_UPLOAD_TO_SERVER:
+ netfs_stat(&netfs_n_wh_upload_done);
+ break;
+ case NETFS_WRITE_TO_CACHE:
+ netfs_stat(&netfs_n_wh_write_done);
+ break;
+ case NETFS_INVALID_WRITE:
+ break;
+ default:
+ BUG();
+ }
+
+ if (IS_ERR_VALUE(transferred_or_error)) {
+ subreq->error = transferred_or_error;
+ if (subreq->error == -EAGAIN)
+ set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
+ else
+ set_bit(NETFS_SREQ_FAILED, &subreq->flags);
+ trace_netfs_failure(wreq, subreq, transferred_or_error, netfs_fail_write);
+
+ switch (subreq->source) {
+ case NETFS_WRITE_TO_CACHE:
+ netfs_stat(&netfs_n_wh_write_failed);
+ break;
+ case NETFS_UPLOAD_TO_SERVER:
+ netfs_stat(&netfs_n_wh_upload_failed);
+ break;
+ default:
+ break;
+ }
+ trace_netfs_rreq(wreq, netfs_rreq_trace_set_pause);
+ set_bit(NETFS_RREQ_PAUSE, &wreq->flags);
+ } else {
+ if (WARN(transferred_or_error > subreq->len - subreq->transferred,
+ "Subreq excess write: R=%x[%x] %zd > %zu - %zu",
+ wreq->debug_id, subreq->debug_index,
+ transferred_or_error, subreq->len, subreq->transferred))
+ transferred_or_error = subreq->len - subreq->transferred;
+
+ subreq->error = 0;
+ subreq->transferred += transferred_or_error;
+
+ if (subreq->transferred < subreq->len)
+ set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
+ }
+
+ trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
+
+ clear_bit_unlock(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
+ wake_up_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS);
+
+ /* If we are at the head of the queue, wake up the collector,
+ * transferring a ref to it if we were the ones to do so.
+ */
+ if (list_is_first(&subreq->rreq_link, &stream->subrequests))
+ netfs_wake_write_collector(wreq, was_async);
+
+ netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
+}
+EXPORT_SYMBOL(netfs_write_subrequest_terminated);
diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
new file mode 100644
index 000000000000..e190043bc0da
--- /dev/null
+++ b/fs/netfs/write_issue.c
@@ -0,0 +1,684 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Network filesystem high-level (buffered) writeback.
+ *
+ * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ *
+ * To support network filesystems with local caching, we manage a situation
+ * that can be envisioned like the following:
+ *
+ * +---+---+-----+-----+---+----------+
+ * Folios: | | | | | | |
+ * +---+---+-----+-----+---+----------+
+ *
+ * +------+------+ +----+----+
+ * Upload: | | |.....| | |
+ * (Stream 0) +------+------+ +----+----+
+ *
+ * +------+------+------+------+------+
+ * Cache: | | | | | |
+ * (Stream 1) +------+------+------+------+------+
+ *
+ * Where we have a sequence of folios of varying sizes that we need to overlay
+ * with multiple parallel streams of I/O requests, where the I/O requests in a
+ * stream may also be of various sizes (in cifs, for example, the sizes are
+ * negotiated with the server; in something like ceph, they may represent the
+ * sizes of storage objects).
+ *
+ * The sequence in each stream may contain gaps and noncontiguous subrequests
+ * may be glued together into single vectored write RPCs.
+ */
+
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include "internal.h"
+
+/*
+ * Kill all dirty folios in the event of an unrecoverable error, starting with
+ * a locked folio we've already obtained from writeback_iter().
+ */
+static void netfs_kill_dirty_pages(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct folio *folio)
+{
+ int error = 0;
+
+ do {
+ enum netfs_folio_trace why = netfs_folio_trace_kill;
+ struct netfs_group *group = NULL;
+ struct netfs_folio *finfo = NULL;
+ void *priv;
+
+ priv = folio_detach_private(folio);
+ if (priv) {
+ finfo = __netfs_folio_info(priv);
+ if (finfo) {
+ /* Kill folio from streaming write. */
+ group = finfo->netfs_group;
+ why = netfs_folio_trace_kill_s;
+ } else {
+ group = priv;
+ if (group == NETFS_FOLIO_COPY_TO_CACHE) {
+ /* Kill copy-to-cache folio */
+ why = netfs_folio_trace_kill_cc;
+ group = NULL;
+ } else {
+ /* Kill folio with group */
+ why = netfs_folio_trace_kill_g;
+ }
+ }
+ }
+
+ trace_netfs_folio(folio, why);
+
+ folio_start_writeback(folio);
+ folio_unlock(folio);
+ folio_end_writeback(folio);
+
+ netfs_put_group(group);
+ kfree(finfo);
+
+ } while ((folio = writeback_iter(mapping, wbc, folio, &error)));
+}
+
+/*
+ * Create a write request and set it up appropriately for the origin type.
+ */
+struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
+ struct file *file,
+ loff_t start,
+ enum netfs_io_origin origin)
+{
+ struct netfs_io_request *wreq;
+ struct netfs_inode *ictx;
+
+ wreq = netfs_alloc_request(mapping, file, start, 0, origin);
+ if (IS_ERR(wreq))
+ return wreq;
+
+ _enter("R=%x", wreq->debug_id);
+
+ ictx = netfs_inode(wreq->inode);
+ if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &wreq->flags))
+ fscache_begin_write_operation(&wreq->cache_resources, netfs_i_cookie(ictx));
+
+ wreq->contiguity = wreq->start;
+ wreq->cleaned_to = wreq->start;
+ INIT_WORK(&wreq->work, netfs_write_collection_worker);
+
+ wreq->io_streams[0].stream_nr = 0;
+ wreq->io_streams[0].source = NETFS_UPLOAD_TO_SERVER;
+ wreq->io_streams[0].prepare_write = ictx->ops->prepare_write;
+ wreq->io_streams[0].issue_write = ictx->ops->issue_write;
+ wreq->io_streams[0].collected_to = start;
+ wreq->io_streams[0].transferred = LONG_MAX;
+
+ wreq->io_streams[1].stream_nr = 1;
+ wreq->io_streams[1].source = NETFS_WRITE_TO_CACHE;
+ wreq->io_streams[1].collected_to = start;
+ wreq->io_streams[1].transferred = LONG_MAX;
+ if (fscache_resources_valid(&wreq->cache_resources)) {
+ wreq->io_streams[1].avail = true;
+ wreq->io_streams[1].prepare_write = wreq->cache_resources.ops->prepare_write_subreq;
+ wreq->io_streams[1].issue_write = wreq->cache_resources.ops->issue_write;
+ }
+
+ return wreq;
+}
+
+/**
+ * netfs_prepare_write_failed - Note write preparation failed
+ * @subreq: The subrequest to mark
+ *
+ * Mark a subrequest to note that preparation for write failed.
+ */
+void netfs_prepare_write_failed(struct netfs_io_subrequest *subreq)
+{
+ __set_bit(NETFS_SREQ_FAILED, &subreq->flags);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_prep_failed);
+}
+EXPORT_SYMBOL(netfs_prepare_write_failed);
+
+/*
+ * Prepare a write subrequest. We need to allocate a new subrequest
+ * if we don't have one.
+ */
+static void netfs_prepare_write(struct netfs_io_request *wreq,
+ struct netfs_io_stream *stream,
+ loff_t start)
+{
+ struct netfs_io_subrequest *subreq;
+
+ subreq = netfs_alloc_subrequest(wreq);
+ subreq->source = stream->source;
+ subreq->start = start;
+ subreq->max_len = ULONG_MAX;
+ subreq->max_nr_segs = INT_MAX;
+ subreq->stream_nr = stream->stream_nr;
+
+ _enter("R=%x[%x]", wreq->debug_id, subreq->debug_index);
+
+ trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
+ refcount_read(&subreq->ref),
+ netfs_sreq_trace_new);
+
+ trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
+
+ switch (stream->source) {
+ case NETFS_UPLOAD_TO_SERVER:
+ netfs_stat(&netfs_n_wh_upload);
+ subreq->max_len = wreq->wsize;
+ break;
+ case NETFS_WRITE_TO_CACHE:
+ netfs_stat(&netfs_n_wh_write);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ if (stream->prepare_write)
+ stream->prepare_write(subreq);
+
+ __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
+
+ /* We add to the end of the list whilst the collector may be walking
+ * the list. The collector only goes nextwards and uses the lock to
+ * remove entries off of the front.
+ */
+ spin_lock(&wreq->lock);
+ list_add_tail(&subreq->rreq_link, &stream->subrequests);
+ if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
+ stream->front = subreq;
+ if (!stream->active) {
+ stream->collected_to = stream->front->start;
+ /* Write list pointers before active flag */
+ smp_store_release(&stream->active, true);
+ }
+ }
+
+ spin_unlock(&wreq->lock);
+
+ stream->construct = subreq;
+}
+
+/*
+ * Set the I/O iterator for the filesystem/cache to use and dispatch the I/O
+ * operation. The operation may be asynchronous and should call
+ * netfs_write_subrequest_terminated() when complete.
+ */
+static void netfs_do_issue_write(struct netfs_io_stream *stream,
+ struct netfs_io_subrequest *subreq)
+{
+ struct netfs_io_request *wreq = subreq->rreq;
+
+ _enter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len);
+
+ if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
+ return netfs_write_subrequest_terminated(subreq, subreq->error, false);
+
+ // TODO: Use encrypted buffer
+ if (test_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags)) {
+ subreq->io_iter = wreq->io_iter;
+ iov_iter_advance(&subreq->io_iter,
+ subreq->start + subreq->transferred - wreq->start);
+ iov_iter_truncate(&subreq->io_iter,
+ subreq->len - subreq->transferred);
+ } else {
+ iov_iter_xarray(&subreq->io_iter, ITER_SOURCE, &wreq->mapping->i_pages,
+ subreq->start + subreq->transferred,
+ subreq->len - subreq->transferred);
+ }
+
+ trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
+ stream->issue_write(subreq);
+}
+
+void netfs_reissue_write(struct netfs_io_stream *stream,
+ struct netfs_io_subrequest *subreq)
+{
+ __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
+ netfs_do_issue_write(stream, subreq);
+}
+
+static void netfs_issue_write(struct netfs_io_request *wreq,
+ struct netfs_io_stream *stream)
+{
+ struct netfs_io_subrequest *subreq = stream->construct;
+
+ if (!subreq)
+ return;
+ stream->construct = NULL;
+
+ if (subreq->start + subreq->len > wreq->start + wreq->submitted)
+ wreq->len = wreq->submitted = subreq->start + subreq->len - wreq->start;
+ netfs_do_issue_write(stream, subreq);
+}
+
+/*
+ * Add data to the write subrequest, dispatching each as we fill it up or if it
+ * is discontiguous with the previous. We only fill one part at a time so that
+ * we can avoid overrunning the credits obtained (cifs) and try to parallelise
+ * content-crypto preparation with network writes.
+ */
+int netfs_advance_write(struct netfs_io_request *wreq,
+ struct netfs_io_stream *stream,
+ loff_t start, size_t len, bool to_eof)
+{
+ struct netfs_io_subrequest *subreq = stream->construct;
+ size_t part;
+
+ if (!stream->avail) {
+ _leave("no write");
+ return len;
+ }
+
+ _enter("R=%x[%x]", wreq->debug_id, subreq ? subreq->debug_index : 0);
+
+ if (subreq && start != subreq->start + subreq->len) {
+ netfs_issue_write(wreq, stream);
+ subreq = NULL;
+ }
+
+ if (!stream->construct)
+ netfs_prepare_write(wreq, stream, start);
+ subreq = stream->construct;
+
+ part = min(subreq->max_len - subreq->len, len);
+ _debug("part %zx/%zx %zx/%zx", subreq->len, subreq->max_len, part, len);
+ subreq->len += part;
+ subreq->nr_segs++;
+
+ if (subreq->len >= subreq->max_len ||
+ subreq->nr_segs >= subreq->max_nr_segs ||
+ to_eof) {
+ netfs_issue_write(wreq, stream);
+ subreq = NULL;
+ }
+
+ return part;
+}
+
+/*
+ * Write some of a pending folio data back to the server.
+ */
+static int netfs_write_folio(struct netfs_io_request *wreq,
+ struct writeback_control *wbc,
+ struct folio *folio)
+{
+ struct netfs_io_stream *upload = &wreq->io_streams[0];
+ struct netfs_io_stream *cache = &wreq->io_streams[1];
+ struct netfs_io_stream *stream;
+ struct netfs_group *fgroup; /* TODO: Use this with ceph */
+ struct netfs_folio *finfo;
+ size_t fsize = folio_size(folio), flen = fsize, foff = 0;
+ loff_t fpos = folio_pos(folio), i_size;
+ bool to_eof = false, streamw = false;
+ bool debug = false;
+
+ _enter("");
+
+ /* netfs_perform_write() may shift i_size around the page or from out
+ * of the page to beyond it, but cannot move i_size into or through the
+ * page since we have it locked.
+ */
+ i_size = i_size_read(wreq->inode);
+
+ if (fpos >= i_size) {
+ /* mmap beyond eof. */
+ _debug("beyond eof");
+ folio_start_writeback(folio);
+ folio_unlock(folio);
+ wreq->nr_group_rel += netfs_folio_written_back(folio);
+ netfs_put_group_many(wreq->group, wreq->nr_group_rel);
+ wreq->nr_group_rel = 0;
+ return 0;
+ }
+
+ if (fpos + fsize > wreq->i_size)
+ wreq->i_size = i_size;
+
+ fgroup = netfs_folio_group(folio);
+ finfo = netfs_folio_info(folio);
+ if (finfo) {
+ foff = finfo->dirty_offset;
+ flen = foff + finfo->dirty_len;
+ streamw = true;
+ }
+
+ if (wreq->origin == NETFS_WRITETHROUGH) {
+ to_eof = false;
+ if (flen > i_size - fpos)
+ flen = i_size - fpos;
+ } else if (flen > i_size - fpos) {
+ flen = i_size - fpos;
+ if (!streamw)
+ folio_zero_segment(folio, flen, fsize);
+ to_eof = true;
+ } else if (flen == i_size - fpos) {
+ to_eof = true;
+ }
+ flen -= foff;
+
+ _debug("folio %zx %zx %zx", foff, flen, fsize);
+
+ /* Deal with discontinuities in the stream of dirty pages. These can
+ * arise from a number of sources:
+ *
+ * (1) Intervening non-dirty pages from random-access writes, multiple
+ * flushers writing back different parts simultaneously and manual
+ * syncing.
+ *
+ * (2) Partially-written pages from write-streaming.
+ *
+ * (3) Pages that belong to a different write-back group (eg. Ceph
+ * snapshots).
+ *
+ * (4) Actually-clean pages that were marked for write to the cache
+ * when they were read. Note that these appear as a special
+ * write-back group.
+ */
+ if (fgroup == NETFS_FOLIO_COPY_TO_CACHE) {
+ netfs_issue_write(wreq, upload);
+ } else if (fgroup != wreq->group) {
+ /* We can't write this page to the server yet. */
+ kdebug("wrong group");
+ folio_redirty_for_writepage(wbc, folio);
+ folio_unlock(folio);
+ netfs_issue_write(wreq, upload);
+ netfs_issue_write(wreq, cache);
+ return 0;
+ }
+
+ if (foff > 0)
+ netfs_issue_write(wreq, upload);
+ if (streamw)
+ netfs_issue_write(wreq, cache);
+
+ /* Flip the page to the writeback state and unlock. If we're called
+ * from write-through, then the page has already been put into the wb
+ * state.
+ */
+ if (wreq->origin == NETFS_WRITEBACK)
+ folio_start_writeback(folio);
+ folio_unlock(folio);
+
+ if (fgroup == NETFS_FOLIO_COPY_TO_CACHE) {
+ if (!fscache_resources_valid(&wreq->cache_resources)) {
+ trace_netfs_folio(folio, netfs_folio_trace_cancel_copy);
+ netfs_issue_write(wreq, upload);
+ netfs_folio_written_back(folio);
+ return 0;
+ }
+ trace_netfs_folio(folio, netfs_folio_trace_store_copy);
+ } else if (!upload->construct) {
+ trace_netfs_folio(folio, netfs_folio_trace_store);
+ } else {
+ trace_netfs_folio(folio, netfs_folio_trace_store_plus);
+ }
+
+ /* Move the submission point forward to allow for write-streaming data
+ * not starting at the front of the page. We don't do write-streaming
+ * with the cache as the cache requires DIO alignment.
+ *
+ * Also skip uploading for data that's been read and just needs copying
+ * to the cache.
+ */
+ for (int s = 0; s < NR_IO_STREAMS; s++) {
+ stream = &wreq->io_streams[s];
+ stream->submit_max_len = fsize;
+ stream->submit_off = foff;
+ stream->submit_len = flen;
+ if ((stream->source == NETFS_WRITE_TO_CACHE && streamw) ||
+ (stream->source == NETFS_UPLOAD_TO_SERVER &&
+ fgroup == NETFS_FOLIO_COPY_TO_CACHE)) {
+ stream->submit_off = UINT_MAX;
+ stream->submit_len = 0;
+ stream->submit_max_len = 0;
+ }
+ }
+
+ /* Attach the folio to one or more subrequests. For a big folio, we
+ * could end up with thousands of subrequests if the wsize is small -
+ * but we might need to wait during the creation of subrequests for
+ * network resources (eg. SMB credits).
+ */
+ for (;;) {
+ ssize_t part;
+ size_t lowest_off = ULONG_MAX;
+ int choose_s = -1;
+
+ /* Always add to the lowest-submitted stream first. */
+ for (int s = 0; s < NR_IO_STREAMS; s++) {
+ stream = &wreq->io_streams[s];
+ if (stream->submit_len > 0 &&
+ stream->submit_off < lowest_off) {
+ lowest_off = stream->submit_off;
+ choose_s = s;
+ }
+ }
+
+ if (choose_s < 0)
+ break;
+ stream = &wreq->io_streams[choose_s];
+
+ part = netfs_advance_write(wreq, stream, fpos + stream->submit_off,
+ stream->submit_len, to_eof);
+ atomic64_set(&wreq->issued_to, fpos + stream->submit_off);
+ stream->submit_off += part;
+ stream->submit_max_len -= part;
+ if (part > stream->submit_len)
+ stream->submit_len = 0;
+ else
+ stream->submit_len -= part;
+ if (part > 0)
+ debug = true;
+ }
+
+ atomic64_set(&wreq->issued_to, fpos + fsize);
+
+ if (!debug)
+ kdebug("R=%x: No submit", wreq->debug_id);
+
+ if (flen < fsize)
+ for (int s = 0; s < NR_IO_STREAMS; s++)
+ netfs_issue_write(wreq, &wreq->io_streams[s]);
+
+ _leave(" = 0");
+ return 0;
+}
+
+/*
+ * Write some of the pending data back to the server
+ */
+int netfs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct netfs_inode *ictx = netfs_inode(mapping->host);
+ struct netfs_io_request *wreq = NULL;
+ struct folio *folio;
+ int error = 0;
+
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ mutex_lock(&ictx->wb_lock);
+ else if (!mutex_trylock(&ictx->wb_lock))
+ return 0;
+
+ /* Need the first folio to be able to set up the op. */
+ folio = writeback_iter(mapping, wbc, NULL, &error);
+ if (!folio)
+ goto out;
+
+ wreq = netfs_create_write_req(mapping, NULL, folio_pos(folio), NETFS_WRITEBACK);
+ if (IS_ERR(wreq)) {
+ error = PTR_ERR(wreq);
+ goto couldnt_start;
+ }
+
+ trace_netfs_write(wreq, netfs_write_trace_writeback);
+ netfs_stat(&netfs_n_wh_writepages);
+
+ do {
+ _debug("wbiter %lx %llx", folio->index, wreq->start + wreq->submitted);
+
+ /* It appears we don't have to handle cyclic writeback wrapping. */
+ WARN_ON_ONCE(wreq && folio_pos(folio) < wreq->start + wreq->submitted);
+
+ if (netfs_folio_group(folio) != NETFS_FOLIO_COPY_TO_CACHE &&
+ unlikely(!test_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags))) {
+ set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
+ wreq->netfs_ops->begin_writeback(wreq);
+ }
+
+ error = netfs_write_folio(wreq, wbc, folio);
+ if (error < 0)
+ break;
+ } while ((folio = writeback_iter(mapping, wbc, folio, &error)));
+
+ for (int s = 0; s < NR_IO_STREAMS; s++)
+ netfs_issue_write(wreq, &wreq->io_streams[s]);
+ smp_wmb(); /* Write lists before ALL_QUEUED. */
+ set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
+
+ mutex_unlock(&ictx->wb_lock);
+
+ netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
+ _leave(" = %d", error);
+ return error;
+
+couldnt_start:
+ netfs_kill_dirty_pages(mapping, wbc, folio);
+out:
+ mutex_unlock(&ictx->wb_lock);
+ _leave(" = %d", error);
+ return error;
+}
+EXPORT_SYMBOL(netfs_writepages);
+
+/*
+ * Begin a write operation for writing through the pagecache.
+ */
+struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len)
+{
+ struct netfs_io_request *wreq = NULL;
+ struct netfs_inode *ictx = netfs_inode(file_inode(iocb->ki_filp));
+
+ mutex_lock(&ictx->wb_lock);
+
+ wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp,
+ iocb->ki_pos, NETFS_WRITETHROUGH);
+ if (IS_ERR(wreq)) {
+ mutex_unlock(&ictx->wb_lock);
+ return wreq;
+ }
+
+ wreq->io_streams[0].avail = true;
+ trace_netfs_write(wreq, netfs_write_trace_writethrough);
+ return wreq;
+}
+
+/*
+ * Advance the state of the write operation used when writing through the
+ * pagecache. Data has been copied into the pagecache that we need to append
+ * to the request. If we've added more than wsize then we need to create a new
+ * subrequest.
+ */
+int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
+ struct folio *folio, size_t copied, bool to_page_end,
+ struct folio **writethrough_cache)
+{
+ _enter("R=%x ic=%zu ws=%u cp=%zu tp=%u",
+ wreq->debug_id, wreq->iter.count, wreq->wsize, copied, to_page_end);
+
+ if (!*writethrough_cache) {
+ if (folio_test_dirty(folio))
+ /* Sigh. mmap. */
+ folio_clear_dirty_for_io(folio);
+
+ /* We can make multiple writes to the folio... */
+ folio_start_writeback(folio);
+ if (wreq->len == 0)
+ trace_netfs_folio(folio, netfs_folio_trace_wthru);
+ else
+ trace_netfs_folio(folio, netfs_folio_trace_wthru_plus);
+ *writethrough_cache = folio;
+ }
+
+ wreq->len += copied;
+ if (!to_page_end)
+ return 0;
+
+ *writethrough_cache = NULL;
+ return netfs_write_folio(wreq, wbc, folio);
+}
+
+/*
+ * End a write operation used when writing through the pagecache.
+ */
+int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
+ struct folio *writethrough_cache)
+{
+ struct netfs_inode *ictx = netfs_inode(wreq->inode);
+ int ret;
+
+ _enter("R=%x", wreq->debug_id);
+
+ if (writethrough_cache)
+ netfs_write_folio(wreq, wbc, writethrough_cache);
+
+ netfs_issue_write(wreq, &wreq->io_streams[0]);
+ netfs_issue_write(wreq, &wreq->io_streams[1]);
+ smp_wmb(); /* Write lists before ALL_QUEUED. */
+ set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
+
+ mutex_unlock(&ictx->wb_lock);
+
+ ret = wreq->error;
+ netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
+ return ret;
+}
+
+/*
+ * Write data to the server without going through the pagecache and without
+ * writing it to the local cache.
+ */
+int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len)
+{
+ struct netfs_io_stream *upload = &wreq->io_streams[0];
+ ssize_t part;
+ loff_t start = wreq->start;
+ int error = 0;
+
+ _enter("%zx", len);
+
+ if (wreq->origin == NETFS_DIO_WRITE)
+ inode_dio_begin(wreq->inode);
+
+ while (len) {
+ // TODO: Prepare content encryption
+
+ _debug("unbuffered %zx", len);
+ part = netfs_advance_write(wreq, upload, start, len, false);
+ start += part;
+ len -= part;
+ if (test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) {
+ trace_netfs_rreq(wreq, netfs_rreq_trace_wait_pause);
+ wait_on_bit(&wreq->flags, NETFS_RREQ_PAUSE, TASK_UNINTERRUPTIBLE);
+ }
+ if (test_bit(NETFS_RREQ_FAILED, &wreq->flags))
+ break;
+ }
+
+ netfs_issue_write(wreq, upload);
+
+ smp_wmb(); /* Write lists before ALL_QUEUED. */
+ set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
+ if (list_empty(&upload->subrequests))
+ netfs_wake_write_collector(wreq, false);
+
+ _leave(" = %d", error);
+ return error;
+}
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 407c6e15afe2..6bd127e6683d 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -433,7 +433,7 @@ static void nfs_invalidate_folio(struct folio *folio, size_t offset,
return;
/* Cancel any unstarted writes on this page */
nfs_wb_folio_cancel(inode, folio);
- folio_wait_fscache(folio);
+ folio_wait_private_2(folio); /* [DEPRECATED] */
trace_nfs_invalidate_folio(inode, folio);
}
@@ -500,7 +500,7 @@ static int nfs_launder_folio(struct folio *folio)
dfprintk(PAGECACHE, "NFS: launder_folio(%ld, %llu)\n",
inode->i_ino, folio_pos(folio));
- folio_wait_fscache(folio);
+ folio_wait_private_2(folio); /* [DEPRECATED] */
ret = nfs_wb_folio(inode, folio);
trace_nfs_launder_folio_done(inode, folio, ret);
return ret;
@@ -593,8 +593,8 @@ static vm_fault_t nfs_vm_page_mkwrite(struct vm_fault *vmf)
sb_start_pagefault(inode->i_sb);
/* make sure the cache has finished storing the page */
- if (folio_test_fscache(folio) &&
- folio_wait_fscache_killable(folio) < 0) {
+ if (folio_test_private_2(folio) && /* [DEPRECATED] */
+ folio_wait_private_2_killable(folio) < 0) {
ret = VM_FAULT_RETRY;
goto out;
}
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index e3cb4923316b..fbed0027996f 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -81,6 +81,8 @@ static inline void nfs_netfs_put(struct nfs_netfs_io_data *netfs)
static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi)
{
netfs_inode_init(&nfsi->netfs, &nfs_netfs_ops, false);
+ /* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */
+ __set_bit(NETFS_ICTX_USE_PGPRIV2, &nfsi->netfs.flags);
}
extern void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr);
extern void nfs_netfs_read_completion(struct nfs_pgio_header *hdr);
@@ -101,10 +103,10 @@ extern int nfs_netfs_read_folio(struct file *file, struct folio *folio);
static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp)
{
- if (folio_test_fscache(folio)) {
+ if (folio_test_private_2(folio)) { /* [DEPRECATED] */
if (current_is_kswapd() || !(gfp & __GFP_FS))
return false;
- folio_wait_fscache(folio);
+ folio_wait_private_2(folio);
}
fscache_note_page_release(netfs_i_cookie(netfs_inode(folio->mapping->host)));
return true;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index c709c296ea9a..acef52ecb1bb 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -2429,7 +2429,12 @@ static int nfs_net_init(struct net *net)
struct nfs_net *nn = net_generic(net, nfs_net_id);
nfs_clients_init(net);
- rpc_proc_register(net, &nn->rpcstats);
+
+ if (!rpc_proc_register(net, &nn->rpcstats)) {
+ nfs_clients_exit(net);
+ return -ENOMEM;
+ }
+
return nfs_fs_proc_net_init(net);
}
diff --git a/fs/nfs/iostat.h b/fs/nfs/iostat.h
index 5aa776b5a3e7..b17a9eb9b148 100644
--- a/fs/nfs/iostat.h
+++ b/fs/nfs/iostat.h
@@ -46,10 +46,7 @@ static inline void nfs_add_stats(const struct inode *inode,
nfs_add_server_stats(NFS_SERVER(inode), stat, addend);
}
-static inline struct nfs_iostats __percpu *nfs_alloc_iostats(void)
-{
- return alloc_percpu(struct nfs_iostats);
-}
+#define nfs_alloc_iostats() alloc_percpu(struct nfs_iostats)
static inline void nfs_free_iostats(struct nfs_iostats __percpu *stats)
{
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 5de85d725fb9..2329cbb0e446 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -2120,10 +2120,10 @@ int nfs_migrate_folio(struct address_space *mapping, struct folio *dst,
if (folio_test_private(src))
return -EBUSY;
- if (folio_test_fscache(src)) {
+ if (folio_test_private_2(src)) { /* [DEPRECATED] */
if (mode == MIGRATE_ASYNC)
return -EBUSY;
- folio_wait_fscache(src);
+ folio_wait_private_2(src);
}
return migrate_folio(mapping, dst, src, mode);
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 7b641095a665..50b3135d07ac 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -334,21 +334,25 @@ static void nfsd4_fslocs_free(struct nfsd4_fs_locations *fsloc)
static int export_stats_init(struct export_stats *stats)
{
stats->start_time = ktime_get_seconds();
- return nfsd_percpu_counters_init(stats->counter, EXP_STATS_COUNTERS_NUM);
+ return percpu_counter_init_many(stats->counter, 0, GFP_KERNEL,
+ EXP_STATS_COUNTERS_NUM);
}
static void export_stats_reset(struct export_stats *stats)
{
- if (stats)
- nfsd_percpu_counters_reset(stats->counter,
- EXP_STATS_COUNTERS_NUM);
+ if (stats) {
+ int i;
+
+ for (i = 0; i < EXP_STATS_COUNTERS_NUM; i++)
+ percpu_counter_set(&stats->counter[i], 0);
+ }
}
static void export_stats_destroy(struct export_stats *stats)
{
if (stats)
- nfsd_percpu_counters_destroy(stats->counter,
- EXP_STATS_COUNTERS_NUM);
+ percpu_counter_destroy_many(stats->counter,
+ EXP_STATS_COUNTERS_NUM);
}
static void svc_export_put(struct kref *ref)
diff --git a/fs/nfsd/netlink.c b/fs/nfsd/netlink.c
index 0e1d635ec5f9..62d2586d9902 100644
--- a/fs/nfsd/netlink.c
+++ b/fs/nfsd/netlink.c
@@ -10,6 +10,36 @@
#include <uapi/linux/nfsd_netlink.h>
+/* Common nested types */
+const struct nla_policy nfsd_sock_nl_policy[NFSD_A_SOCK_TRANSPORT_NAME + 1] = {
+ [NFSD_A_SOCK_ADDR] = { .type = NLA_BINARY, },
+ [NFSD_A_SOCK_TRANSPORT_NAME] = { .type = NLA_NUL_STRING, },
+};
+
+const struct nla_policy nfsd_version_nl_policy[NFSD_A_VERSION_ENABLED + 1] = {
+ [NFSD_A_VERSION_MAJOR] = { .type = NLA_U32, },
+ [NFSD_A_VERSION_MINOR] = { .type = NLA_U32, },
+ [NFSD_A_VERSION_ENABLED] = { .type = NLA_FLAG, },
+};
+
+/* NFSD_CMD_THREADS_SET - do */
+static const struct nla_policy nfsd_threads_set_nl_policy[NFSD_A_SERVER_SCOPE + 1] = {
+ [NFSD_A_SERVER_THREADS] = { .type = NLA_U32, },
+ [NFSD_A_SERVER_GRACETIME] = { .type = NLA_U32, },
+ [NFSD_A_SERVER_LEASETIME] = { .type = NLA_U32, },
+ [NFSD_A_SERVER_SCOPE] = { .type = NLA_NUL_STRING, },
+};
+
+/* NFSD_CMD_VERSION_SET - do */
+static const struct nla_policy nfsd_version_set_nl_policy[NFSD_A_SERVER_PROTO_VERSION + 1] = {
+ [NFSD_A_SERVER_PROTO_VERSION] = NLA_POLICY_NESTED(nfsd_version_nl_policy),
+};
+
+/* NFSD_CMD_LISTENER_SET - do */
+static const struct nla_policy nfsd_listener_set_nl_policy[NFSD_A_SERVER_SOCK_ADDR + 1] = {
+ [NFSD_A_SERVER_SOCK_ADDR] = NLA_POLICY_NESTED(nfsd_sock_nl_policy),
+};
+
/* Ops table for nfsd */
static const struct genl_split_ops nfsd_nl_ops[] = {
{
@@ -19,6 +49,42 @@ static const struct genl_split_ops nfsd_nl_ops[] = {
.done = nfsd_nl_rpc_status_get_done,
.flags = GENL_CMD_CAP_DUMP,
},
+ {
+ .cmd = NFSD_CMD_THREADS_SET,
+ .doit = nfsd_nl_threads_set_doit,
+ .policy = nfsd_threads_set_nl_policy,
+ .maxattr = NFSD_A_SERVER_SCOPE,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = NFSD_CMD_THREADS_GET,
+ .doit = nfsd_nl_threads_get_doit,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = NFSD_CMD_VERSION_SET,
+ .doit = nfsd_nl_version_set_doit,
+ .policy = nfsd_version_set_nl_policy,
+ .maxattr = NFSD_A_SERVER_PROTO_VERSION,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = NFSD_CMD_VERSION_GET,
+ .doit = nfsd_nl_version_get_doit,
+ .flags = GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = NFSD_CMD_LISTENER_SET,
+ .doit = nfsd_nl_listener_set_doit,
+ .policy = nfsd_listener_set_nl_policy,
+ .maxattr = NFSD_A_SERVER_SOCK_ADDR,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = NFSD_CMD_LISTENER_GET,
+ .doit = nfsd_nl_listener_get_doit,
+ .flags = GENL_CMD_CAP_DO,
+ },
};
struct genl_family nfsd_nl_family __ro_after_init = {
diff --git a/fs/nfsd/netlink.h b/fs/nfsd/netlink.h
index d83dd6bdee92..e3724637d64d 100644
--- a/fs/nfsd/netlink.h
+++ b/fs/nfsd/netlink.h
@@ -11,11 +11,21 @@
#include <uapi/linux/nfsd_netlink.h>
+/* Common nested types */
+extern const struct nla_policy nfsd_sock_nl_policy[NFSD_A_SOCK_TRANSPORT_NAME + 1];
+extern const struct nla_policy nfsd_version_nl_policy[NFSD_A_VERSION_ENABLED + 1];
+
int nfsd_nl_rpc_status_get_start(struct netlink_callback *cb);
int nfsd_nl_rpc_status_get_done(struct netlink_callback *cb);
int nfsd_nl_rpc_status_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
+int nfsd_nl_threads_set_doit(struct sk_buff *skb, struct genl_info *info);
+int nfsd_nl_threads_get_doit(struct sk_buff *skb, struct genl_info *info);
+int nfsd_nl_version_set_doit(struct sk_buff *skb, struct genl_info *info);
+int nfsd_nl_version_get_doit(struct sk_buff *skb, struct genl_info *info);
+int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info);
+int nfsd_nl_listener_get_doit(struct sk_buff *skb, struct genl_info *info);
extern struct genl_family nfsd_nl_family;
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index d4be519b5734..14ec15656320 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -218,6 +218,7 @@ struct nfsd_net {
/* Simple check to find out if a given net was properly initialized */
#define nfsd_netns_ready(nn) ((nn)->sessionid_hashtbl)
+extern bool nfsd_support_version(int vers);
extern void nfsd_netns_free_versions(struct nfsd_net *nn);
extern unsigned int nfsd_net_id;
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 87c9547989f6..d756f443fc44 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -978,20 +978,12 @@ static int max_cb_time(struct net *net)
return max(((u32)nn->nfsd4_lease)/10, 1u) * HZ;
}
-static struct workqueue_struct *callback_wq;
-
static bool nfsd4_queue_cb(struct nfsd4_callback *cb)
{
- trace_nfsd_cb_queue(cb->cb_clp, cb);
- return queue_delayed_work(callback_wq, &cb->cb_work, 0);
-}
+ struct nfs4_client *clp = cb->cb_clp;
-static void nfsd4_queue_cb_delayed(struct nfsd4_callback *cb,
- unsigned long msecs)
-{
- trace_nfsd_cb_queue(cb->cb_clp, cb);
- queue_delayed_work(callback_wq, &cb->cb_work,
- msecs_to_jiffies(msecs));
+ trace_nfsd_cb_queue(clp, cb);
+ return queue_work(clp->cl_callback_wq, &cb->cb_work);
}
static void nfsd41_cb_inflight_begin(struct nfs4_client *clp)
@@ -1161,7 +1153,7 @@ void nfsd4_probe_callback(struct nfs4_client *clp)
void nfsd4_probe_callback_sync(struct nfs4_client *clp)
{
nfsd4_probe_callback(clp);
- flush_workqueue(callback_wq);
+ flush_workqueue(clp->cl_callback_wq);
}
void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
@@ -1380,19 +1372,6 @@ static const struct rpc_call_ops nfsd4_cb_ops = {
.rpc_release = nfsd4_cb_release,
};
-int nfsd4_create_callback_queue(void)
-{
- callback_wq = alloc_ordered_workqueue("nfsd4_callbacks", 0);
- if (!callback_wq)
- return -ENOMEM;
- return 0;
-}
-
-void nfsd4_destroy_callback_queue(void)
-{
- destroy_workqueue(callback_wq);
-}
-
/* must be called under the state lock */
void nfsd4_shutdown_callback(struct nfs4_client *clp)
{
@@ -1406,7 +1385,7 @@ void nfsd4_shutdown_callback(struct nfs4_client *clp)
* client, destroy the rpc client, and stop:
*/
nfsd4_run_cb(&clp->cl_cb_null);
- flush_workqueue(callback_wq);
+ flush_workqueue(clp->cl_callback_wq);
nfsd41_cb_inflight_wait_complete(clp);
}
@@ -1428,9 +1407,9 @@ static struct nfsd4_conn * __nfsd4_find_backchannel(struct nfs4_client *clp)
/*
* Note there isn't a lot of locking in this code; instead we depend on
- * the fact that it is run from the callback_wq, which won't run two
- * work items at once. So, for example, callback_wq handles all access
- * of cl_cb_client and all calls to rpc_create or rpc_shutdown_client.
+ * the fact that it is run from clp->cl_callback_wq, which won't run two
+ * work items at once. So, for example, clp->cl_callback_wq handles all
+ * access of cl_cb_client and all calls to rpc_create or rpc_shutdown_client.
*/
static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
{
@@ -1490,7 +1469,7 @@ static void
nfsd4_run_cb_work(struct work_struct *work)
{
struct nfsd4_callback *cb =
- container_of(work, struct nfsd4_callback, cb_work.work);
+ container_of(work, struct nfsd4_callback, cb_work);
struct nfs4_client *clp = cb->cb_clp;
struct rpc_clnt *clnt;
int flags;
@@ -1502,16 +1481,8 @@ nfsd4_run_cb_work(struct work_struct *work)
clnt = clp->cl_cb_client;
if (!clnt) {
- if (test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags))
- nfsd41_destroy_cb(cb);
- else {
- /*
- * XXX: Ideally, we could wait for the client to
- * reconnect, but I haven't figured out how
- * to do that yet.
- */
- nfsd4_queue_cb_delayed(cb, 25);
- }
+ /* Callback channel broken, or client killed; give up: */
+ nfsd41_destroy_cb(cb);
return;
}
@@ -1544,7 +1515,7 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
cb->cb_msg.rpc_argp = cb;
cb->cb_msg.rpc_resp = cb;
cb->cb_ops = ops;
- INIT_DELAYED_WORK(&cb->cb_work, nfsd4_run_cb_work);
+ INIT_WORK(&cb->cb_work, nfsd4_run_cb_work);
cb->cb_status = 0;
cb->cb_need_restart = false;
cb->cb_holds_slot = false;
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 2927b1263f08..46bd20fe5c0f 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1737,7 +1737,7 @@ static void cleanup_async_copy(struct nfsd4_copy *copy)
nfs4_put_copy(copy);
}
-static void nfsd4_send_cb_offload(struct nfsd4_copy *copy, __be32 nfserr)
+static void nfsd4_send_cb_offload(struct nfsd4_copy *copy)
{
struct nfsd4_cb_offload *cbo;
@@ -1747,12 +1747,12 @@ static void nfsd4_send_cb_offload(struct nfsd4_copy *copy, __be32 nfserr)
memcpy(&cbo->co_res, &copy->cp_res, sizeof(copy->cp_res));
memcpy(&cbo->co_fh, &copy->fh, sizeof(copy->fh));
- cbo->co_nfserr = nfserr;
+ cbo->co_nfserr = copy->nfserr;
nfsd4_init_cb(&cbo->co_cb, copy->cp_clp, &nfsd4_cb_offload_ops,
NFSPROC4_CLNT_CB_OFFLOAD);
trace_nfsd_cb_offload(copy->cp_clp, &cbo->co_res.cb_stateid,
- &cbo->co_fh, copy->cp_count, nfserr);
+ &cbo->co_fh, copy->cp_count, copy->nfserr);
nfsd4_run_cb(&cbo->co_cb);
}
@@ -1766,7 +1766,6 @@ static void nfsd4_send_cb_offload(struct nfsd4_copy *copy, __be32 nfserr)
static int nfsd4_do_async_copy(void *data)
{
struct nfsd4_copy *copy = (struct nfsd4_copy *)data;
- __be32 nfserr;
trace_nfsd_copy_do_async(copy);
if (nfsd4_ssc_is_inter(copy)) {
@@ -1777,24 +1776,25 @@ static int nfsd4_do_async_copy(void *data)
if (IS_ERR(filp)) {
switch (PTR_ERR(filp)) {
case -EBADF:
- nfserr = nfserr_wrong_type;
+ copy->nfserr = nfserr_wrong_type;
break;
default:
- nfserr = nfserr_offload_denied;
+ copy->nfserr = nfserr_offload_denied;
}
/* ss_mnt will be unmounted by the laundromat */
goto do_callback;
}
- nfserr = nfsd4_do_copy(copy, filp, copy->nf_dst->nf_file,
- false);
+ copy->nfserr = nfsd4_do_copy(copy, filp, copy->nf_dst->nf_file,
+ false);
nfsd4_cleanup_inter_ssc(copy->ss_nsui, filp, copy->nf_dst);
} else {
- nfserr = nfsd4_do_copy(copy, copy->nf_src->nf_file,
- copy->nf_dst->nf_file, false);
+ copy->nfserr = nfsd4_do_copy(copy, copy->nf_src->nf_file,
+ copy->nf_dst->nf_file, false);
}
do_callback:
- nfsd4_send_cb_offload(copy, nfserr);
+ set_bit(NFSD4_COPY_F_COMPLETED, &copy->cp_flags);
+ nfsd4_send_cb_offload(copy);
cleanup_async_copy(copy);
return 0;
}
@@ -1807,6 +1807,13 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
__be32 status;
struct nfsd4_copy *async_copy = NULL;
+ /*
+ * Currently, async COPY is not reliable. Force all COPY
+ * requests to be synchronous to avoid client application
+ * hangs waiting for COPY completion.
+ */
+ nfsd4_copy_set_sync(copy, true);
+
copy->cp_clp = cstate->clp;
if (nfsd4_ssc_is_inter(copy)) {
trace_nfsd_copy_inter(copy);
@@ -2003,11 +2010,16 @@ nfsd4_offload_status(struct svc_rqst *rqstp,
struct nfsd4_copy *copy;
struct nfs4_client *clp = cstate->clp;
+ os->completed = false;
spin_lock(&clp->async_lock);
copy = find_async_copy_locked(clp, &os->stateid);
- if (copy)
+ if (copy) {
os->count = copy->cp_res.wr_bytes_written;
- else
+ if (test_bit(NFSD4_COPY_F_COMPLETED, &copy->cp_flags)) {
+ os->completed = true;
+ os->status = copy->nfserr;
+ }
+ } else
status = nfserr_bad_stateid;
spin_unlock(&clp->async_lock);
@@ -2154,6 +2166,29 @@ nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
return status == nfserr_same ? nfs_ok : status;
}
+static __be32
+nfsd4_get_dir_delegation(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate,
+ union nfsd4_op_u *u)
+{
+ struct nfsd4_get_dir_delegation *gdd = &u->get_dir_delegation;
+
+ /*
+ * RFC 8881, section 18.39.3 says:
+ *
+ * "The server may refuse to grant the delegation. In that case, the
+ * server will return NFS4ERR_DIRDELEG_UNAVAIL."
+ *
+ * This is sub-optimal, since it means that the server would need to
+ * abort compound processing just because the delegation wasn't
+ * available. RFC8881bis should change this to allow the server to
+ * return NFS4_OK with a non-fatal status of GDD4_UNAVAIL in this
+ * situation.
+ */
+ gdd->gddrnf_status = GDD4_UNAVAIL;
+ return nfs_ok;
+}
+
#ifdef CONFIG_NFSD_PNFS
static const struct nfsd4_layout_ops *
nfsd4_layout_verify(struct svc_export *exp, unsigned int layout_type)
@@ -3082,6 +3117,18 @@ static u32 nfsd4_copy_notify_rsize(const struct svc_rqst *rqstp,
* sizeof(__be32);
}
+static u32 nfsd4_get_dir_delegation_rsize(const struct svc_rqst *rqstp,
+ const struct nfsd4_op *op)
+{
+ return (op_encode_hdr_size +
+ 1 /* gddr_status */ +
+ op_encode_verifier_maxsz +
+ op_encode_stateid_maxsz +
+ 2 /* gddr_notification */ +
+ 2 /* gddr_child_attributes */ +
+ 2 /* gddr_dir_attributes */);
+}
+
#ifdef CONFIG_NFSD_PNFS
static u32 nfsd4_getdeviceinfo_rsize(const struct svc_rqst *rqstp,
const struct nfsd4_op *op)
@@ -3470,6 +3517,12 @@ static const struct nfsd4_operation nfsd4_ops[] = {
.op_get_currentstateid = nfsd4_get_freestateid,
.op_rsize_bop = nfsd4_only_status_rsize,
},
+ [OP_GET_DIR_DELEGATION] = {
+ .op_func = nfsd4_get_dir_delegation,
+ .op_flags = OP_MODIFIES_SOMETHING,
+ .op_name = "OP_GET_DIR_DELEGATION",
+ .op_rsize_bop = nfsd4_get_dir_delegation_rsize,
+ },
#ifdef CONFIG_NFSD_PNFS
[OP_GETDEVICEINFO] = {
.op_func = nfsd4_getdeviceinfo,
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 1a93c7fcf76c..a20c2c9d7d45 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -541,7 +541,7 @@ same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
}
static struct nfs4_openowner *
-find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
+find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
struct nfs4_client *clp)
{
struct nfs4_stateowner *so;
@@ -558,18 +558,6 @@ find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
return NULL;
}
-static struct nfs4_openowner *
-find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
- struct nfs4_client *clp)
-{
- struct nfs4_openowner *oo;
-
- spin_lock(&clp->cl_lock);
- oo = find_openstateowner_str_locked(hashval, open, clp);
- spin_unlock(&clp->cl_lock);
- return oo;
-}
-
static inline u32
opaque_hashval(const void *ptr, int nbytes)
{
@@ -1409,11 +1397,16 @@ static void
recalculate_deny_mode(struct nfs4_file *fp)
{
struct nfs4_ol_stateid *stp;
+ u32 old_deny;
spin_lock(&fp->fi_lock);
+ old_deny = fp->fi_share_deny;
fp->fi_share_deny = 0;
- list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
+ list_for_each_entry(stp, &fp->fi_stateids, st_perfile) {
fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
+ if (fp->fi_share_deny == old_deny)
+ break;
+ }
spin_unlock(&fp->fi_lock);
}
@@ -2245,6 +2238,10 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name,
GFP_KERNEL);
if (!clp->cl_ownerstr_hashtbl)
goto err_no_hashtbl;
+ clp->cl_callback_wq = alloc_ordered_workqueue("nfsd4_callbacks", 0);
+ if (!clp->cl_callback_wq)
+ goto err_no_callback_wq;
+
for (i = 0; i < OWNER_HASH_SIZE; i++)
INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
INIT_LIST_HEAD(&clp->cl_sessions);
@@ -2267,6 +2264,8 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name,
spin_lock_init(&clp->cl_lock);
rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
return clp;
+err_no_callback_wq:
+ kfree(clp->cl_ownerstr_hashtbl);
err_no_hashtbl:
kfree(clp->cl_name.data);
err_no_name:
@@ -2280,6 +2279,7 @@ static void __free_client(struct kref *k)
struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs);
free_svc_cred(&clp->cl_cred);
+ destroy_workqueue(clp->cl_callback_wq);
kfree(clp->cl_ownerstr_hashtbl);
kfree(clp->cl_name.data);
kfree(clp->cl_nii_domain.data);
@@ -2352,7 +2352,11 @@ unhash_client(struct nfs4_client *clp)
static __be32 mark_client_expired_locked(struct nfs4_client *clp)
{
- if (atomic_read(&clp->cl_rpc_users))
+ int users = atomic_read(&clp->cl_rpc_users);
+
+ trace_nfsd_mark_client_expired(clp, users);
+
+ if (users)
return nfserr_jukebox;
unhash_client_locked(clp);
return nfs_ok;
@@ -3042,12 +3046,9 @@ static void
nfsd4_cb_recall_any_release(struct nfsd4_callback *cb)
{
struct nfs4_client *clp = cb->cb_clp;
- struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
- spin_lock(&nn->client_lock);
clear_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
- put_client_renew_locked(clp);
- spin_unlock(&nn->client_lock);
+ drop_client(clp);
}
static int
@@ -3644,12 +3645,8 @@ out_nolock:
return status;
}
-static __be32
-check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
+static __be32 check_slot_seqid(u32 seqid, u32 slot_seqid, bool slot_inuse)
{
- dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
- slot_seqid);
-
/* The slot is in use, and no response has been sent. */
if (slot_inuse) {
if (seqid == slot_seqid)
@@ -3826,20 +3823,28 @@ nfsd4_create_session(struct svc_rqst *rqstp,
}
/* RFC 8881 Section 18.36.4 Phase 2: Sequence ID processing. */
- if (conf)
+ if (conf) {
cs_slot = &conf->cl_cs_slot;
- else
+ trace_nfsd_slot_seqid_conf(conf, cr_ses);
+ } else {
cs_slot = &unconf->cl_cs_slot;
+ trace_nfsd_slot_seqid_unconf(unconf, cr_ses);
+ }
status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
- if (status) {
- if (status == nfserr_replay_cache) {
- status = nfsd4_replay_create_session(cr_ses, cs_slot);
- goto out_free_conn;
- }
+ switch (status) {
+ case nfs_ok:
+ cs_slot->sl_seqid++;
+ cr_ses->seqid = cs_slot->sl_seqid;
+ break;
+ case nfserr_replay_cache:
+ status = nfsd4_replay_create_session(cr_ses, cs_slot);
+ fallthrough;
+ case nfserr_jukebox:
+ /* The server MUST NOT cache NFS4ERR_DELAY */
+ goto out_free_conn;
+ default:
goto out_cache_error;
}
- cs_slot->sl_seqid++;
- cr_ses->seqid = cs_slot->sl_seqid;
/* RFC 8881 Section 18.36.4 Phase 3: Client ID confirmation. */
if (conf) {
@@ -3859,10 +3864,8 @@ nfsd4_create_session(struct svc_rqst *rqstp,
old = find_confirmed_client_by_name(&unconf->cl_name, nn);
if (old) {
status = mark_client_expired_locked(old);
- if (status) {
- old = NULL;
- goto out_cache_error;
- }
+ if (status)
+ goto out_expired_error;
trace_nfsd_clid_replaced(&old->cl_clientid);
}
move_to_confirmed(unconf);
@@ -3894,6 +3897,17 @@ nfsd4_create_session(struct svc_rqst *rqstp,
expire_client(old);
return status;
+out_expired_error:
+ old = NULL;
+ /*
+ * Revert the slot seq_nr change so the server will process
+ * the client's resend instead of returning a cached response.
+ */
+ if (status == nfserr_jukebox) {
+ cs_slot->sl_seqid--;
+ cr_ses->seqid = cs_slot->sl_seqid;
+ goto out_free_conn;
+ }
out_cache_error:
nfsd4_cache_create_session(cr_ses, cs_slot, status);
out_free_conn:
@@ -4210,6 +4224,7 @@ nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* sr_highest_slotid and the sr_target_slot id to maxslots */
seq->maxslots = session->se_fchannel.maxreqs;
+ trace_nfsd_slot_seqid_sequence(clp, seq, slot);
status = check_slot_seqid(seq->seqid, slot->sl_seqid,
slot->sl_flags & NFSD4_SLOT_INUSE);
if (status == nfserr_replay_cache) {
@@ -4651,21 +4666,32 @@ nfsd4_init_leases_net(struct nfsd_net *nn)
atomic_set(&nn->nfsd_courtesy_clients, 0);
}
+enum rp_lock {
+ RP_UNLOCKED,
+ RP_LOCKED,
+ RP_UNHASHED,
+};
+
static void init_nfs4_replay(struct nfs4_replay *rp)
{
rp->rp_status = nfserr_serverfault;
rp->rp_buflen = 0;
rp->rp_buf = rp->rp_ibuf;
- mutex_init(&rp->rp_mutex);
+ atomic_set(&rp->rp_locked, RP_UNLOCKED);
}
-static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
- struct nfs4_stateowner *so)
+static int nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
+ struct nfs4_stateowner *so)
{
if (!nfsd4_has_session(cstate)) {
- mutex_lock(&so->so_replay.rp_mutex);
+ wait_var_event(&so->so_replay.rp_locked,
+ atomic_cmpxchg(&so->so_replay.rp_locked,
+ RP_UNLOCKED, RP_LOCKED) != RP_LOCKED);
+ if (atomic_read(&so->so_replay.rp_locked) == RP_UNHASHED)
+ return -EAGAIN;
cstate->replay_owner = nfs4_get_stateowner(so);
}
+ return 0;
}
void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
@@ -4674,7 +4700,8 @@ void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
if (so != NULL) {
cstate->replay_owner = NULL;
- mutex_unlock(&so->so_replay.rp_mutex);
+ atomic_set(&so->so_replay.rp_locked, RP_UNLOCKED);
+ wake_up_var(&so->so_replay.rp_locked);
nfs4_put_stateowner(so);
}
}
@@ -4855,34 +4882,46 @@ nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
}
static struct nfs4_openowner *
-alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
- struct nfsd4_compound_state *cstate)
+find_or_alloc_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
+ struct nfsd4_compound_state *cstate)
{
struct nfs4_client *clp = cstate->clp;
- struct nfs4_openowner *oo, *ret;
+ struct nfs4_openowner *oo, *new = NULL;
- oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
- if (!oo)
- return NULL;
- oo->oo_owner.so_ops = &openowner_ops;
- oo->oo_owner.so_is_open_owner = 1;
- oo->oo_owner.so_seqid = open->op_seqid;
- oo->oo_flags = 0;
- if (nfsd4_has_session(cstate))
- oo->oo_flags |= NFS4_OO_CONFIRMED;
- oo->oo_time = 0;
- oo->oo_last_closed_stid = NULL;
- INIT_LIST_HEAD(&oo->oo_close_lru);
+retry:
spin_lock(&clp->cl_lock);
- ret = find_openstateowner_str_locked(strhashval, open, clp);
- if (ret == NULL) {
- hash_openowner(oo, clp, strhashval);
- ret = oo;
- } else
- nfs4_free_stateowner(&oo->oo_owner);
-
+ oo = find_openstateowner_str(strhashval, open, clp);
+ if (!oo && new) {
+ hash_openowner(new, clp, strhashval);
+ spin_unlock(&clp->cl_lock);
+ return new;
+ }
spin_unlock(&clp->cl_lock);
- return ret;
+
+ if (oo && !(oo->oo_flags & NFS4_OO_CONFIRMED)) {
+ /* Replace unconfirmed owners without checking for replay. */
+ release_openowner(oo);
+ oo = NULL;
+ }
+ if (oo) {
+ if (new)
+ nfs4_free_stateowner(&new->oo_owner);
+ return oo;
+ }
+
+ new = alloc_stateowner(openowner_slab, &open->op_owner, clp);
+ if (!new)
+ return NULL;
+ new->oo_owner.so_ops = &openowner_ops;
+ new->oo_owner.so_is_open_owner = 1;
+ new->oo_owner.so_seqid = open->op_seqid;
+ new->oo_flags = 0;
+ if (nfsd4_has_session(cstate))
+ new->oo_flags |= NFS4_OO_CONFIRMED;
+ new->oo_time = 0;
+ new->oo_last_closed_stid = NULL;
+ INIT_LIST_HEAD(&new->oo_close_lru);
+ goto retry;
}
static struct nfs4_ol_stateid *
@@ -4958,7 +4997,11 @@ move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
* Wait for the refcount to drop to 2. Since it has been unhashed,
* there should be no danger of the refcount going back up again at
* this point.
+ * Some threads with a reference might be waiting for rp_locked,
+ * so tell them to stop waiting.
*/
+ atomic_set(&oo->oo_owner.so_replay.rp_locked, RP_UNHASHED);
+ wake_up_var(&oo->oo_owner.so_replay.rp_locked);
wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
release_all_access(s);
@@ -5331,27 +5374,19 @@ nfsd4_process_open1(struct nfsd4_compound_state *cstate,
clp = cstate->clp;
strhashval = ownerstr_hashval(&open->op_owner);
- oo = find_openstateowner_str(strhashval, open, clp);
+retry:
+ oo = find_or_alloc_open_stateowner(strhashval, open, cstate);
open->op_openowner = oo;
- if (!oo) {
- goto new_owner;
- }
- if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
- /* Replace unconfirmed owners without checking for replay. */
- release_openowner(oo);
- open->op_openowner = NULL;
- goto new_owner;
+ if (!oo)
+ return nfserr_jukebox;
+ if (nfsd4_cstate_assign_replay(cstate, &oo->oo_owner) == -EAGAIN) {
+ nfs4_put_stateowner(&oo->oo_owner);
+ goto retry;
}
status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
if (status)
return status;
- goto alloc_stateid;
-new_owner:
- oo = alloc_init_open_stateowner(strhashval, open, cstate);
- if (oo == NULL)
- return nfserr_jukebox;
- open->op_openowner = oo;
-alloc_stateid:
+
open->op_stp = nfs4_alloc_open_stateid(clp);
if (!open->op_stp)
return nfserr_jukebox;
@@ -6122,12 +6157,8 @@ out:
void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
struct nfsd4_open *open)
{
- if (open->op_openowner) {
- struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
-
- nfsd4_cstate_assign_replay(cstate, so);
- nfs4_put_stateowner(so);
- }
+ if (open->op_openowner)
+ nfs4_put_stateowner(&open->op_openowner->oo_owner);
if (open->op_file)
kmem_cache_free(file_slab, open->op_file);
if (open->op_stp)
@@ -6602,7 +6633,7 @@ deleg_reaper(struct nfsd_net *nn)
list_add(&clp->cl_ra_cblist, &cblist);
/* release in nfsd4_cb_recall_any_release */
- atomic_inc(&clp->cl_rpc_users);
+ kref_get(&clp->cl_nfsdfs.cl_ref);
set_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
clp->cl_ra_time = ktime_get_boottime_seconds();
}
@@ -7191,12 +7222,16 @@ nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
trace_nfsd_preprocess(seqid, stateid);
*stpp = NULL;
+retry:
status = nfsd4_lookup_stateid(cstate, stateid,
typemask, statusmask, &s, nn);
if (status)
return status;
stp = openlockstateid(s);
- nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
+ if (nfsd4_cstate_assign_replay(cstate, stp->st_stateowner) == -EAGAIN) {
+ nfs4_put_stateowner(stp->st_stateowner);
+ goto retry;
+ }
status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
if (!status)
@@ -7338,7 +7373,7 @@ out:
return status;
}
-static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
+static bool nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
{
struct nfs4_client *clp = s->st_stid.sc_client;
bool unhashed;
@@ -7355,11 +7390,11 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
list_for_each_entry(stp, &reaplist, st_locks)
nfs4_free_cpntf_statelist(clp->net, &stp->st_stid);
free_ol_stateid_reaplist(&reaplist);
+ return false;
} else {
spin_unlock(&clp->cl_lock);
free_ol_stateid_reaplist(&reaplist);
- if (unhashed)
- move_to_close_lru(s, clp->net);
+ return unhashed;
}
}
@@ -7375,6 +7410,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfs4_ol_stateid *stp;
struct net *net = SVC_NET(rqstp);
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ bool need_move_to_close_list;
dprintk("NFSD: nfsd4_close on file %pd\n",
cstate->current_fh.fh_dentry);
@@ -7399,8 +7435,10 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
*/
nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
- nfsd4_close_open_stateid(stp);
+ need_move_to_close_list = nfsd4_close_open_stateid(stp);
mutex_unlock(&stp->st_mutex);
+ if (need_move_to_close_list)
+ move_to_close_lru(stp, net);
/* v4.1+ suggests that we send a special stateid in here, since the
* clients should just ignore this anyway. Since this is not useful
@@ -8614,12 +8652,6 @@ nfs4_state_start(void)
if (ret)
return ret;
- ret = nfsd4_create_callback_queue();
- if (ret) {
- rhltable_destroy(&nfs4_file_rhltable);
- return ret;
- }
-
set_max_delegations();
return 0;
}
@@ -8660,7 +8692,6 @@ nfs4_state_shutdown_net(struct net *net)
void
nfs4_state_shutdown(void)
{
- nfsd4_destroy_callback_queue();
rhltable_destroy(&nfs4_file_rhltable);
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index fac938f563ad..c7bfd2180e3f 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1732,6 +1732,35 @@ nfsd4_decode_free_stateid(struct nfsd4_compoundargs *argp,
return nfsd4_decode_stateid4(argp, &free_stateid->fr_stateid);
}
+static __be32
+nfsd4_decode_get_dir_delegation(struct nfsd4_compoundargs *argp,
+ union nfsd4_op_u *u)
+{
+ struct nfsd4_get_dir_delegation *gdd = &u->get_dir_delegation;
+ __be32 status;
+
+ memset(gdd, 0, sizeof(*gdd));
+
+ if (xdr_stream_decode_bool(argp->xdr, &gdd->gdda_signal_deleg_avail) < 0)
+ return nfserr_bad_xdr;
+ status = nfsd4_decode_bitmap4(argp, gdd->gdda_notification_types,
+ ARRAY_SIZE(gdd->gdda_notification_types));
+ if (status)
+ return status;
+ status = nfsd4_decode_nfstime4(argp, &gdd->gdda_child_attr_delay);
+ if (status)
+ return status;
+ status = nfsd4_decode_nfstime4(argp, &gdd->gdda_dir_attr_delay);
+ if (status)
+ return status;
+ status = nfsd4_decode_bitmap4(argp, gdd->gdda_child_attributes,
+ ARRAY_SIZE(gdd->gdda_child_attributes));
+ if (status)
+ return status;
+ return nfsd4_decode_bitmap4(argp, gdd->gdda_dir_attributes,
+ ARRAY_SIZE(gdd->gdda_dir_attributes));
+}
+
#ifdef CONFIG_NFSD_PNFS
static __be32
nfsd4_decode_getdeviceinfo(struct nfsd4_compoundargs *argp,
@@ -2370,7 +2399,7 @@ static const nfsd4_dec nfsd4_dec_ops[] = {
[OP_CREATE_SESSION] = nfsd4_decode_create_session,
[OP_DESTROY_SESSION] = nfsd4_decode_destroy_session,
[OP_FREE_STATEID] = nfsd4_decode_free_stateid,
- [OP_GET_DIR_DELEGATION] = nfsd4_decode_notsupp,
+ [OP_GET_DIR_DELEGATION] = nfsd4_decode_get_dir_delegation,
#ifdef CONFIG_NFSD_PNFS
[OP_GETDEVICEINFO] = nfsd4_decode_getdeviceinfo,
[OP_GETDEVICELIST] = nfsd4_decode_notsupp,
@@ -3490,11 +3519,13 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
struct dentry *dentry, const u32 *bmval,
int ignore_crossmnt)
{
+ DECLARE_BITMAP(attr_bitmap, ARRAY_SIZE(nfsd4_enc_fattr4_encode_ops));
struct nfsd4_fattr_args args;
struct svc_fh *tempfh = NULL;
int starting_len = xdr->buf->len;
__be32 *attrlen_p, status;
int attrlen_offset;
+ u32 attrmask[3];
int err;
struct nfsd4_compoundres *resp = rqstp->rq_resp;
u32 minorversion = resp->cstate.minorversion;
@@ -3502,10 +3533,6 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
.mnt = exp->ex_path.mnt,
.dentry = dentry,
};
- union {
- u32 attrmask[3];
- unsigned long mask[2];
- } u;
unsigned long bit;
bool file_modified = false;
u64 size = 0;
@@ -3517,24 +3544,24 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
args.exp = exp;
args.dentry = dentry;
args.ignore_crossmnt = (ignore_crossmnt != 0);
+ args.acl = NULL;
/*
* Make a local copy of the attribute bitmap that can be modified.
*/
- memset(&u, 0, sizeof(u));
- u.attrmask[0] = bmval[0];
- u.attrmask[1] = bmval[1];
- u.attrmask[2] = bmval[2];
+ attrmask[0] = bmval[0];
+ attrmask[1] = bmval[1];
+ attrmask[2] = bmval[2];
args.rdattr_err = 0;
if (exp->ex_fslocs.migrated) {
- status = fattr_handle_absent_fs(&u.attrmask[0], &u.attrmask[1],
- &u.attrmask[2], &args.rdattr_err);
+ status = fattr_handle_absent_fs(&attrmask[0], &attrmask[1],
+ &attrmask[2], &args.rdattr_err);
if (status)
goto out;
}
args.size = 0;
- if (u.attrmask[0] & (FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE)) {
+ if (attrmask[0] & (FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE)) {
status = nfsd4_deleg_getattr_conflict(rqstp, d_inode(dentry),
&file_modified, &size);
if (status)
@@ -3553,16 +3580,16 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
if (!(args.stat.result_mask & STATX_BTIME))
/* underlying FS does not offer btime so we can't share it */
- u.attrmask[1] &= ~FATTR4_WORD1_TIME_CREATE;
- if ((u.attrmask[0] & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE |
+ attrmask[1] &= ~FATTR4_WORD1_TIME_CREATE;
+ if ((attrmask[0] & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE |
FATTR4_WORD0_FILES_TOTAL | FATTR4_WORD0_MAXNAME)) ||
- (u.attrmask[1] & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
+ (attrmask[1] & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
FATTR4_WORD1_SPACE_TOTAL))) {
err = vfs_statfs(&path, &args.statfs);
if (err)
goto out_nfserr;
}
- if ((u.attrmask[0] & (FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FSID)) &&
+ if ((attrmask[0] & (FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FSID)) &&
!fhp) {
tempfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL);
status = nfserr_jukebox;
@@ -3576,11 +3603,10 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
} else
args.fhp = fhp;
- args.acl = NULL;
- if (u.attrmask[0] & FATTR4_WORD0_ACL) {
+ if (attrmask[0] & FATTR4_WORD0_ACL) {
err = nfsd4_get_nfs4_acl(rqstp, dentry, &args.acl);
if (err == -EOPNOTSUPP)
- u.attrmask[0] &= ~FATTR4_WORD0_ACL;
+ attrmask[0] &= ~FATTR4_WORD0_ACL;
else if (err == -EINVAL) {
status = nfserr_attrnotsupp;
goto out;
@@ -3592,17 +3618,17 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
args.context = NULL;
- if ((u.attrmask[2] & FATTR4_WORD2_SECURITY_LABEL) ||
- u.attrmask[0] & FATTR4_WORD0_SUPPORTED_ATTRS) {
+ if ((attrmask[2] & FATTR4_WORD2_SECURITY_LABEL) ||
+ attrmask[0] & FATTR4_WORD0_SUPPORTED_ATTRS) {
if (exp->ex_flags & NFSEXP_SECURITY_LABEL)
err = security_inode_getsecctx(d_inode(dentry),
&args.context, &args.contextlen);
else
err = -EOPNOTSUPP;
args.contextsupport = (err == 0);
- if (u.attrmask[2] & FATTR4_WORD2_SECURITY_LABEL) {
+ if (attrmask[2] & FATTR4_WORD2_SECURITY_LABEL) {
if (err == -EOPNOTSUPP)
- u.attrmask[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
+ attrmask[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
else if (err)
goto out_nfserr;
}
@@ -3610,8 +3636,8 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
#endif /* CONFIG_NFSD_V4_SECURITY_LABEL */
/* attrmask */
- status = nfsd4_encode_bitmap4(xdr, u.attrmask[0],
- u.attrmask[1], u.attrmask[2]);
+ status = nfsd4_encode_bitmap4(xdr, attrmask[0], attrmask[1],
+ attrmask[2]);
if (status)
goto out;
@@ -3620,7 +3646,9 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
attrlen_p = xdr_reserve_space(xdr, XDR_UNIT);
if (!attrlen_p)
goto out_resource;
- for_each_set_bit(bit, (const unsigned long *)&u.mask,
+ bitmap_from_arr32(attr_bitmap, attrmask,
+ ARRAY_SIZE(nfsd4_enc_fattr4_encode_ops));
+ for_each_set_bit(bit, attr_bitmap,
ARRAY_SIZE(nfsd4_enc_fattr4_encode_ops)) {
status = nfsd4_enc_fattr4_encode_ops[bit](xdr, &args);
if (status != nfs_ok)
@@ -4964,6 +4992,49 @@ nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
return nfs_ok;
}
+static __be32
+nfsd4_encode_get_dir_delegation(struct nfsd4_compoundres *resp, __be32 nfserr,
+ union nfsd4_op_u *u)
+{
+ struct nfsd4_get_dir_delegation *gdd = &u->get_dir_delegation;
+ struct xdr_stream *xdr = resp->xdr;
+ __be32 status = nfserr_resource;
+
+ switch(gdd->gddrnf_status) {
+ case GDD4_OK:
+ if (xdr_stream_encode_u32(xdr, GDD4_OK) != XDR_UNIT)
+ break;
+ status = nfsd4_encode_verifier4(xdr, &gdd->gddr_cookieverf);
+ if (status)
+ break;
+ status = nfsd4_encode_stateid4(xdr, &gdd->gddr_stateid);
+ if (status)
+ break;
+ status = nfsd4_encode_bitmap4(xdr, gdd->gddr_notification[0], 0, 0);
+ if (status)
+ break;
+ status = nfsd4_encode_bitmap4(xdr, gdd->gddr_child_attributes[0],
+ gdd->gddr_child_attributes[1],
+ gdd->gddr_child_attributes[2]);
+ if (status)
+ break;
+ status = nfsd4_encode_bitmap4(xdr, gdd->gddr_dir_attributes[0],
+ gdd->gddr_dir_attributes[1],
+ gdd->gddr_dir_attributes[2]);
+ break;
+ default:
+ pr_warn("nfsd: bad gddrnf_status (%u)\n", gdd->gddrnf_status);
+ gdd->gddrnf_will_signal_deleg_avail = 0;
+ fallthrough;
+ case GDD4_UNAVAIL:
+ if (xdr_stream_encode_u32(xdr, GDD4_UNAVAIL) != XDR_UNIT)
+ break;
+ status = nfsd4_encode_bool(xdr, gdd->gddrnf_will_signal_deleg_avail);
+ break;
+ }
+ return status;
+}
+
#ifdef CONFIG_NFSD_PNFS
static __be32
nfsd4_encode_device_addr4(struct xdr_stream *xdr,
@@ -5200,7 +5271,12 @@ nfsd4_encode_offload_status(struct nfsd4_compoundres *resp, __be32 nfserr,
if (nfserr != nfs_ok)
return nfserr;
/* osr_complete<1> */
- if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
+ if (os->completed) {
+ if (xdr_stream_encode_u32(xdr, 1) != XDR_UNIT)
+ return nfserr_resource;
+ if (xdr_stream_encode_be32(xdr, os->status) != XDR_UNIT)
+ return nfserr_resource;
+ } else if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
return nfserr_resource;
return nfs_ok;
}
@@ -5580,7 +5656,7 @@ static const nfsd4_enc nfsd4_enc_ops[] = {
[OP_CREATE_SESSION] = nfsd4_encode_create_session,
[OP_DESTROY_SESSION] = nfsd4_encode_noop,
[OP_FREE_STATEID] = nfsd4_encode_noop,
- [OP_GET_DIR_DELEGATION] = nfsd4_encode_noop,
+ [OP_GET_DIR_DELEGATION] = nfsd4_encode_get_dir_delegation,
#ifdef CONFIG_NFSD_PNFS
[OP_GETDEVICEINFO] = nfsd4_encode_getdeviceinfo,
[OP_GETDEVICELIST] = nfsd4_encode_noop,
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index ecd18bffeebc..202140df8f82 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -15,6 +15,7 @@
#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/gss_api.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
+#include <linux/sunrpc/svc.h>
#include <linux/module.h>
#include <linux/fsnotify.h>
@@ -48,12 +49,10 @@ enum {
NFSD_MaxBlkSize,
NFSD_MaxConnections,
NFSD_Filecache,
-#ifdef CONFIG_NFSD_V4
NFSD_Leasetime,
NFSD_Gracetime,
NFSD_RecoveryDir,
NFSD_V4EndGrace,
-#endif
NFSD_MaxReserved
};
@@ -406,7 +405,9 @@ static ssize_t write_threads(struct file *file, char *buf, size_t size)
if (newthreads < 0)
return -EINVAL;
trace_nfsd_ctl_threads(net, newthreads);
- rv = nfsd_svc(newthreads, net, file->f_cred);
+ mutex_lock(&nfsd_mutex);
+ rv = nfsd_svc(newthreads, net, file->f_cred, NULL);
+ mutex_unlock(&nfsd_mutex);
if (rv < 0)
return rv;
} else
@@ -1360,7 +1361,9 @@ static int nfsd_fill_super(struct super_block *sb, struct fs_context *fc)
#ifdef CONFIG_NFSD_V4
[NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_Gracetime] = {"nfsv4gracetime", &transaction_ops, S_IWUSR|S_IRUSR},
+#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
[NFSD_RecoveryDir] = {"nfsv4recoverydir", &transaction_ops, S_IWUSR|S_IRUSR},
+#endif
[NFSD_V4EndGrace] = {"v4_end_grace", &transaction_ops, S_IWUSR|S_IRUGO},
#endif
/* last one */ {""}
@@ -1652,6 +1655,518 @@ int nfsd_nl_rpc_status_get_done(struct netlink_callback *cb)
}
/**
+ * nfsd_nl_threads_set_doit - set the number of running threads
+ * @skb: reply buffer
+ * @info: netlink metadata and command arguments
+ *
+ * Return 0 on success or a negative errno.
+ */
+int nfsd_nl_threads_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ int nthreads = 0, count = 0, nrpools, ret = -EOPNOTSUPP, rem;
+ struct net *net = genl_info_net(info);
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ const struct nlattr *attr;
+ const char *scope = NULL;
+
+ if (GENL_REQ_ATTR_CHECK(info, NFSD_A_SERVER_THREADS))
+ return -EINVAL;
+
+ /* count number of SERVER_THREADS values */
+ nlmsg_for_each_attr(attr, info->nlhdr, GENL_HDRLEN, rem) {
+ if (nla_type(attr) == NFSD_A_SERVER_THREADS)
+ count++;
+ }
+
+ mutex_lock(&nfsd_mutex);
+
+ nrpools = nfsd_nrpools(net);
+ if (nrpools && count > nrpools)
+ count = nrpools;
+
+ /* XXX: make this handle non-global pool-modes */
+ if (count > 1)
+ goto out_unlock;
+
+ nthreads = nla_get_u32(info->attrs[NFSD_A_SERVER_THREADS]);
+ if (info->attrs[NFSD_A_SERVER_GRACETIME] ||
+ info->attrs[NFSD_A_SERVER_LEASETIME] ||
+ info->attrs[NFSD_A_SERVER_SCOPE]) {
+ ret = -EBUSY;
+ if (nn->nfsd_serv && nn->nfsd_serv->sv_nrthreads)
+ goto out_unlock;
+
+ ret = -EINVAL;
+ attr = info->attrs[NFSD_A_SERVER_GRACETIME];
+ if (attr) {
+ u32 gracetime = nla_get_u32(attr);
+
+ if (gracetime < 10 || gracetime > 3600)
+ goto out_unlock;
+
+ nn->nfsd4_grace = gracetime;
+ }
+
+ attr = info->attrs[NFSD_A_SERVER_LEASETIME];
+ if (attr) {
+ u32 leasetime = nla_get_u32(attr);
+
+ if (leasetime < 10 || leasetime > 3600)
+ goto out_unlock;
+
+ nn->nfsd4_lease = leasetime;
+ }
+
+ attr = info->attrs[NFSD_A_SERVER_SCOPE];
+ if (attr)
+ scope = nla_data(attr);
+ }
+
+ ret = nfsd_svc(nthreads, net, get_current_cred(), scope);
+
+out_unlock:
+ mutex_unlock(&nfsd_mutex);
+
+ return ret == nthreads ? 0 : ret;
+}
+
+/**
+ * nfsd_nl_threads_get_doit - get the number of running threads
+ * @skb: reply buffer
+ * @info: netlink metadata and command arguments
+ *
+ * Return 0 on success or a negative errno.
+ */
+int nfsd_nl_threads_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net *net = genl_info_net(info);
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ void *hdr;
+ int err;
+
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = genlmsg_iput(skb, info);
+ if (!hdr) {
+ err = -EMSGSIZE;
+ goto err_free_msg;
+ }
+
+ mutex_lock(&nfsd_mutex);
+
+ err = nla_put_u32(skb, NFSD_A_SERVER_GRACETIME,
+ nn->nfsd4_grace) ||
+ nla_put_u32(skb, NFSD_A_SERVER_LEASETIME,
+ nn->nfsd4_lease) ||
+ nla_put_string(skb, NFSD_A_SERVER_SCOPE,
+ nn->nfsd_name);
+ if (err)
+ goto err_unlock;
+
+ if (nn->nfsd_serv) {
+ int i;
+
+ for (i = 0; i < nfsd_nrpools(net); ++i) {
+ struct svc_pool *sp = &nn->nfsd_serv->sv_pools[i];
+
+ err = nla_put_u32(skb, NFSD_A_SERVER_THREADS,
+ atomic_read(&sp->sp_nrthreads));
+ if (err)
+ goto err_unlock;
+ }
+ } else {
+ err = nla_put_u32(skb, NFSD_A_SERVER_THREADS, 0);
+ if (err)
+ goto err_unlock;
+ }
+
+ mutex_unlock(&nfsd_mutex);
+
+ genlmsg_end(skb, hdr);
+
+ return genlmsg_reply(skb, info);
+
+err_unlock:
+ mutex_unlock(&nfsd_mutex);
+err_free_msg:
+ nlmsg_free(skb);
+
+ return err;
+}
+
+/**
+ * nfsd_nl_version_set_doit - set the nfs enabled versions
+ * @skb: reply buffer
+ * @info: netlink metadata and command arguments
+ *
+ * Return 0 on success or a negative errno.
+ */
+int nfsd_nl_version_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ const struct nlattr *attr;
+ struct nfsd_net *nn;
+ int i, rem;
+
+ if (GENL_REQ_ATTR_CHECK(info, NFSD_A_SERVER_PROTO_VERSION))
+ return -EINVAL;
+
+ mutex_lock(&nfsd_mutex);
+
+ nn = net_generic(genl_info_net(info), nfsd_net_id);
+ if (nn->nfsd_serv) {
+ mutex_unlock(&nfsd_mutex);
+ return -EBUSY;
+ }
+
+ /* clear current supported versions. */
+ nfsd_vers(nn, 2, NFSD_CLEAR);
+ nfsd_vers(nn, 3, NFSD_CLEAR);
+ for (i = 0; i <= NFSD_SUPPORTED_MINOR_VERSION; i++)
+ nfsd_minorversion(nn, i, NFSD_CLEAR);
+
+ nlmsg_for_each_attr(attr, info->nlhdr, GENL_HDRLEN, rem) {
+ struct nlattr *tb[NFSD_A_VERSION_MAX + 1];
+ u32 major, minor = 0;
+ bool enabled;
+
+ if (nla_type(attr) != NFSD_A_SERVER_PROTO_VERSION)
+ continue;
+
+ if (nla_parse_nested(tb, NFSD_A_VERSION_MAX, attr,
+ nfsd_version_nl_policy, info->extack) < 0)
+ continue;
+
+ if (!tb[NFSD_A_VERSION_MAJOR])
+ continue;
+
+ major = nla_get_u32(tb[NFSD_A_VERSION_MAJOR]);
+ if (tb[NFSD_A_VERSION_MINOR])
+ minor = nla_get_u32(tb[NFSD_A_VERSION_MINOR]);
+
+ enabled = nla_get_flag(tb[NFSD_A_VERSION_ENABLED]);
+
+ switch (major) {
+ case 4:
+ nfsd_minorversion(nn, minor, enabled ? NFSD_SET : NFSD_CLEAR);
+ break;
+ case 3:
+ case 2:
+ if (!minor)
+ nfsd_vers(nn, major, enabled ? NFSD_SET : NFSD_CLEAR);
+ break;
+ default:
+ break;
+ }
+ }
+
+ mutex_unlock(&nfsd_mutex);
+
+ return 0;
+}
+
+/**
+ * nfsd_nl_version_get_doit - get the enabled status for all supported nfs versions
+ * @skb: reply buffer
+ * @info: netlink metadata and command arguments
+ *
+ * Return 0 on success or a negative errno.
+ */
+int nfsd_nl_version_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nfsd_net *nn;
+ int i, err;
+ void *hdr;
+
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = genlmsg_iput(skb, info);
+ if (!hdr) {
+ err = -EMSGSIZE;
+ goto err_free_msg;
+ }
+
+ mutex_lock(&nfsd_mutex);
+ nn = net_generic(genl_info_net(info), nfsd_net_id);
+
+ for (i = 2; i <= 4; i++) {
+ int j;
+
+ for (j = 0; j <= NFSD_SUPPORTED_MINOR_VERSION; j++) {
+ struct nlattr *attr;
+
+ /* Don't record any versions the kernel doesn't have
+ * compiled in
+ */
+ if (!nfsd_support_version(i))
+ continue;
+
+ /* NFSv{2,3} does not support minor numbers */
+ if (i < 4 && j)
+ continue;
+
+ attr = nla_nest_start(skb,
+ NFSD_A_SERVER_PROTO_VERSION);
+ if (!attr) {
+ err = -EINVAL;
+ goto err_nfsd_unlock;
+ }
+
+ if (nla_put_u32(skb, NFSD_A_VERSION_MAJOR, i) ||
+ nla_put_u32(skb, NFSD_A_VERSION_MINOR, j)) {
+ err = -EINVAL;
+ goto err_nfsd_unlock;
+ }
+
+ /* Set the enabled flag if the version is enabled */
+ if (nfsd_vers(nn, i, NFSD_TEST) &&
+ (i < 4 || nfsd_minorversion(nn, j, NFSD_TEST)) &&
+ nla_put_flag(skb, NFSD_A_VERSION_ENABLED)) {
+ err = -EINVAL;
+ goto err_nfsd_unlock;
+ }
+
+ nla_nest_end(skb, attr);
+ }
+ }
+
+ mutex_unlock(&nfsd_mutex);
+ genlmsg_end(skb, hdr);
+
+ return genlmsg_reply(skb, info);
+
+err_nfsd_unlock:
+ mutex_unlock(&nfsd_mutex);
+err_free_msg:
+ nlmsg_free(skb);
+
+ return err;
+}
+
+/**
+ * nfsd_nl_listener_set_doit - set the nfs running sockets
+ * @skb: reply buffer
+ * @info: netlink metadata and command arguments
+ *
+ * Return 0 on success or a negative errno.
+ */
+int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net *net = genl_info_net(info);
+ struct svc_xprt *xprt, *tmp;
+ const struct nlattr *attr;
+ struct svc_serv *serv;
+ LIST_HEAD(permsocks);
+ struct nfsd_net *nn;
+ int err, rem;
+
+ mutex_lock(&nfsd_mutex);
+
+ err = nfsd_create_serv(net);
+ if (err) {
+ mutex_unlock(&nfsd_mutex);
+ return err;
+ }
+
+ nn = net_generic(net, nfsd_net_id);
+ serv = nn->nfsd_serv;
+
+ spin_lock_bh(&serv->sv_lock);
+
+ /* Move all of the old listener sockets to a temp list */
+ list_splice_init(&serv->sv_permsocks, &permsocks);
+
+ /*
+ * Walk the list of server_socks from userland and move any that match
+ * back to sv_permsocks
+ */
+ nlmsg_for_each_attr(attr, info->nlhdr, GENL_HDRLEN, rem) {
+ struct nlattr *tb[NFSD_A_SOCK_MAX + 1];
+ const char *xcl_name;
+ struct sockaddr *sa;
+
+ if (nla_type(attr) != NFSD_A_SERVER_SOCK_ADDR)
+ continue;
+
+ if (nla_parse_nested(tb, NFSD_A_SOCK_MAX, attr,
+ nfsd_sock_nl_policy, info->extack) < 0)
+ continue;
+
+ if (!tb[NFSD_A_SOCK_ADDR] || !tb[NFSD_A_SOCK_TRANSPORT_NAME])
+ continue;
+
+ if (nla_len(tb[NFSD_A_SOCK_ADDR]) < sizeof(*sa))
+ continue;
+
+ xcl_name = nla_data(tb[NFSD_A_SOCK_TRANSPORT_NAME]);
+ sa = nla_data(tb[NFSD_A_SOCK_ADDR]);
+
+ /* Put back any matching sockets */
+ list_for_each_entry_safe(xprt, tmp, &permsocks, xpt_list) {
+ /* This shouldn't be possible */
+ if (WARN_ON_ONCE(xprt->xpt_net != net)) {
+ list_move(&xprt->xpt_list, &serv->sv_permsocks);
+ continue;
+ }
+
+ /* If everything matches, put it back */
+ if (!strcmp(xprt->xpt_class->xcl_name, xcl_name) &&
+ rpc_cmp_addr_port(sa, (struct sockaddr *)&xprt->xpt_local)) {
+ list_move(&xprt->xpt_list, &serv->sv_permsocks);
+ break;
+ }
+ }
+ }
+
+ /* For now, no removing old sockets while server is running */
+ if (serv->sv_nrthreads && !list_empty(&permsocks)) {
+ list_splice_init(&permsocks, &serv->sv_permsocks);
+ spin_unlock_bh(&serv->sv_lock);
+ err = -EBUSY;
+ goto out_unlock_mtx;
+ }
+
+ /* Close the remaining sockets on the permsocks list */
+ while (!list_empty(&permsocks)) {
+ xprt = list_first_entry(&permsocks, struct svc_xprt, xpt_list);
+ list_move(&xprt->xpt_list, &serv->sv_permsocks);
+
+ /*
+ * Newly-created sockets are born with the BUSY bit set. Clear
+ * it if there are no threads, since nothing can pick it up
+ * in that case.
+ */
+ if (!serv->sv_nrthreads)
+ clear_bit(XPT_BUSY, &xprt->xpt_flags);
+
+ set_bit(XPT_CLOSE, &xprt->xpt_flags);
+ spin_unlock_bh(&serv->sv_lock);
+ svc_xprt_close(xprt);
+ spin_lock_bh(&serv->sv_lock);
+ }
+
+ spin_unlock_bh(&serv->sv_lock);
+
+ /* walk list of addrs again, open any that still don't exist */
+ nlmsg_for_each_attr(attr, info->nlhdr, GENL_HDRLEN, rem) {
+ struct nlattr *tb[NFSD_A_SOCK_MAX + 1];
+ const char *xcl_name;
+ struct sockaddr *sa;
+ int ret;
+
+ if (nla_type(attr) != NFSD_A_SERVER_SOCK_ADDR)
+ continue;
+
+ if (nla_parse_nested(tb, NFSD_A_SOCK_MAX, attr,
+ nfsd_sock_nl_policy, info->extack) < 0)
+ continue;
+
+ if (!tb[NFSD_A_SOCK_ADDR] || !tb[NFSD_A_SOCK_TRANSPORT_NAME])
+ continue;
+
+ if (nla_len(tb[NFSD_A_SOCK_ADDR]) < sizeof(*sa))
+ continue;
+
+ xcl_name = nla_data(tb[NFSD_A_SOCK_TRANSPORT_NAME]);
+ sa = nla_data(tb[NFSD_A_SOCK_ADDR]);
+
+ xprt = svc_find_listener(serv, xcl_name, net, sa);
+ if (xprt) {
+ svc_xprt_put(xprt);
+ continue;
+ }
+
+ ret = svc_xprt_create_from_sa(serv, xcl_name, net, sa,
+ SVC_SOCK_ANONYMOUS,
+ get_current_cred());
+ /* always save the latest error */
+ if (ret < 0)
+ err = ret;
+ }
+
+ if (!serv->sv_nrthreads && list_empty(&nn->nfsd_serv->sv_permsocks))
+ nfsd_destroy_serv(net);
+
+out_unlock_mtx:
+ mutex_unlock(&nfsd_mutex);
+
+ return err;
+}
+
+/**
+ * nfsd_nl_listener_get_doit - get the nfs running listeners
+ * @skb: reply buffer
+ * @info: netlink metadata and command arguments
+ *
+ * Return 0 on success or a negative errno.
+ */
+int nfsd_nl_listener_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct svc_xprt *xprt;
+ struct svc_serv *serv;
+ struct nfsd_net *nn;
+ void *hdr;
+ int err;
+
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = genlmsg_iput(skb, info);
+ if (!hdr) {
+ err = -EMSGSIZE;
+ goto err_free_msg;
+ }
+
+ mutex_lock(&nfsd_mutex);
+ nn = net_generic(genl_info_net(info), nfsd_net_id);
+
+ /* no nfs server? Just send empty socket list */
+ if (!nn->nfsd_serv)
+ goto out_unlock_mtx;
+
+ serv = nn->nfsd_serv;
+ spin_lock_bh(&serv->sv_lock);
+ list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
+ struct nlattr *attr;
+
+ attr = nla_nest_start(skb, NFSD_A_SERVER_SOCK_ADDR);
+ if (!attr) {
+ err = -EINVAL;
+ goto err_serv_unlock;
+ }
+
+ if (nla_put_string(skb, NFSD_A_SOCK_TRANSPORT_NAME,
+ xprt->xpt_class->xcl_name) ||
+ nla_put(skb, NFSD_A_SOCK_ADDR,
+ sizeof(struct sockaddr_storage),
+ &xprt->xpt_local)) {
+ err = -EINVAL;
+ goto err_serv_unlock;
+ }
+
+ nla_nest_end(skb, attr);
+ }
+ spin_unlock_bh(&serv->sv_lock);
+out_unlock_mtx:
+ mutex_unlock(&nfsd_mutex);
+ genlmsg_end(skb, hdr);
+
+ return genlmsg_reply(skb, info);
+
+err_serv_unlock:
+ spin_unlock_bh(&serv->sv_lock);
+ mutex_unlock(&nfsd_mutex);
+err_free_msg:
+ nlmsg_free(skb);
+
+ return err;
+}
+
+/**
* nfsd_net_init - Prepare the nfsd_net portion of a new net namespace
* @net: a freshly-created network namespace
*
@@ -1672,7 +2187,8 @@ static __net_init int nfsd_net_init(struct net *net)
retval = nfsd_idmap_init(net);
if (retval)
goto out_idmap_error;
- retval = nfsd_stat_counters_init(nn);
+ retval = percpu_counter_init_many(nn->counter, 0, GFP_KERNEL,
+ NFSD_STATS_COUNTERS_NUM);
if (retval)
goto out_repcache_error;
memset(&nn->nfsd_svcstats, 0, sizeof(nn->nfsd_svcstats));
@@ -1704,7 +2220,7 @@ static __net_exit void nfsd_net_exit(struct net *net)
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
nfsd_proc_stat_shutdown(net);
- nfsd_stat_counters_destroy(nn);
+ percpu_counter_destroy_many(nn->counter, NFSD_STATS_COUNTERS_NUM);
nfsd_idmap_shutdown(net);
nfsd_export_shutdown(net);
nfsd_netns_free_versions(nn);
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 16c5a05f340e..8f4f239d9f8a 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -103,7 +103,7 @@ bool nfssvc_encode_voidres(struct svc_rqst *rqstp,
/*
* Function prototypes.
*/
-int nfsd_svc(int nrservs, struct net *net, const struct cred *cred);
+int nfsd_svc(int nrservs, struct net *net, const struct cred *cred, const char *scope);
int nfsd_dispatch(struct svc_rqst *rqstp);
int nfsd_nrthreads(struct net *);
@@ -230,7 +230,6 @@ void nfsd_lockd_shutdown(void);
#define nfserr_nospc cpu_to_be32(NFSERR_NOSPC)
#define nfserr_rofs cpu_to_be32(NFSERR_ROFS)
#define nfserr_mlink cpu_to_be32(NFSERR_MLINK)
-#define nfserr_opnotsupp cpu_to_be32(NFSERR_OPNOTSUPP)
#define nfserr_nametoolong cpu_to_be32(NFSERR_NAMETOOLONG)
#define nfserr_notempty cpu_to_be32(NFSERR_NOTEMPTY)
#define nfserr_dquot cpu_to_be32(NFSERR_DQUOT)
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 40fecf7b224f..0b75305fb5f5 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -573,7 +573,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
_fh_update(fhp, exp, dentry);
if (fhp->fh_handle.fh_fileid_type == FILEID_INVALID) {
fh_put(fhp);
- return nfserr_opnotsupp;
+ return nfserr_stale;
}
return 0;
@@ -599,7 +599,7 @@ fh_update(struct svc_fh *fhp)
_fh_update(fhp, fhp->fh_export, dentry);
if (fhp->fh_handle.fh_fileid_type == FILEID_INVALID)
- return nfserr_opnotsupp;
+ return nfserr_stale;
return 0;
out_bad:
printk(KERN_ERR "fh_update: fh not verified!\n");
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index c0d17b92b249..cd9a6a1a9fc8 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -133,8 +133,7 @@ struct svc_program nfsd_program = {
.pg_rpcbind_set = nfsd_rpcbind_set,
};
-static bool
-nfsd_support_version(int vers)
+bool nfsd_support_version(int vers)
{
if (vers >= NFSD_MINVERS && vers < NFSD_NRVERS)
return nfsd_version[vers] != NULL;
@@ -769,13 +768,14 @@ int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
* this is the first time nrservs is nonzero.
*/
int
-nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
+nfsd_svc(int nrservs, struct net *net, const struct cred *cred, const char *scope)
{
int error;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct svc_serv *serv;
- mutex_lock(&nfsd_mutex);
+ lockdep_assert_held(&nfsd_mutex);
+
dprintk("nfsd: creating service\n");
nrservs = max(nrservs, 0);
@@ -785,7 +785,7 @@ nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
if (nrservs == 0 && nn->nfsd_serv == NULL)
goto out;
- strscpy(nn->nfsd_name, utsname()->nodename,
+ strscpy(nn->nfsd_name, scope ? scope : utsname()->nodename,
sizeof(nn->nfsd_name));
error = nfsd_create_serv(net);
@@ -804,7 +804,6 @@ out_put:
if (serv->sv_nrthreads == 0)
nfsd_destroy_serv(net);
out:
- mutex_unlock(&nfsd_mutex);
return error;
}
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 01c6f3445646..ffc217099d19 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -68,7 +68,7 @@ struct nfsd4_callback {
struct nfs4_client *cb_clp;
struct rpc_message cb_msg;
const struct nfsd4_callback_ops *cb_ops;
- struct delayed_work cb_work;
+ struct work_struct cb_work;
int cb_seq_status;
int cb_status;
bool cb_need_restart;
@@ -408,6 +408,8 @@ struct nfs4_client {
1 << NFSD4_CLIENT_CB_KILL)
#define NFSD4_CLIENT_CB_RECALL_ANY (6)
unsigned long cl_flags;
+
+ struct workqueue_struct *cl_callback_wq;
const struct cred *cl_cb_cred;
struct rpc_clnt *cl_cb_client;
u32 cl_cb_ident;
@@ -486,7 +488,7 @@ struct nfs4_replay {
unsigned int rp_buflen;
char *rp_buf;
struct knfsd_fh rp_openfh;
- struct mutex rp_mutex;
+ atomic_t rp_locked;
char rp_ibuf[NFSD4_REPLAY_ISIZE];
};
@@ -735,8 +737,6 @@ extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *
extern void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
const struct nfsd4_callback_ops *ops, enum nfsd4_cb_op op);
extern bool nfsd4_run_cb(struct nfsd4_callback *cb);
-extern int nfsd4_create_callback_queue(void);
-extern void nfsd4_destroy_callback_queue(void);
extern void nfsd4_shutdown_callback(struct nfs4_client *);
extern void nfsd4_shutdown_copy(struct nfs4_client *clp);
extern struct nfs4_client_reclaim *nfs4_client_to_reclaim(struct xdr_netobj name,
diff --git a/fs/nfsd/stats.c b/fs/nfsd/stats.c
index be52fb1e928e..bb22893f1157 100644
--- a/fs/nfsd/stats.c
+++ b/fs/nfsd/stats.c
@@ -73,48 +73,6 @@ static int nfsd_show(struct seq_file *seq, void *v)
DEFINE_PROC_SHOW_ATTRIBUTE(nfsd);
-int nfsd_percpu_counters_init(struct percpu_counter *counters, int num)
-{
- int i, err = 0;
-
- for (i = 0; !err && i < num; i++)
- err = percpu_counter_init(&counters[i], 0, GFP_KERNEL);
-
- if (!err)
- return 0;
-
- for (; i > 0; i--)
- percpu_counter_destroy(&counters[i-1]);
-
- return err;
-}
-
-void nfsd_percpu_counters_reset(struct percpu_counter counters[], int num)
-{
- int i;
-
- for (i = 0; i < num; i++)
- percpu_counter_set(&counters[i], 0);
-}
-
-void nfsd_percpu_counters_destroy(struct percpu_counter counters[], int num)
-{
- int i;
-
- for (i = 0; i < num; i++)
- percpu_counter_destroy(&counters[i]);
-}
-
-int nfsd_stat_counters_init(struct nfsd_net *nn)
-{
- return nfsd_percpu_counters_init(nn->counter, NFSD_STATS_COUNTERS_NUM);
-}
-
-void nfsd_stat_counters_destroy(struct nfsd_net *nn)
-{
- nfsd_percpu_counters_destroy(nn->counter, NFSD_STATS_COUNTERS_NUM);
-}
-
void nfsd_proc_stat_init(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
diff --git a/fs/nfsd/stats.h b/fs/nfsd/stats.h
index d2753e975dfd..04aacb6c36e2 100644
--- a/fs/nfsd/stats.h
+++ b/fs/nfsd/stats.h
@@ -10,11 +10,6 @@
#include <uapi/linux/nfsd/stats.h>
#include <linux/percpu_counter.h>
-int nfsd_percpu_counters_init(struct percpu_counter *counters, int num);
-void nfsd_percpu_counters_reset(struct percpu_counter *counters, int num);
-void nfsd_percpu_counters_destroy(struct percpu_counter *counters, int num);
-int nfsd_stat_counters_init(struct nfsd_net *nn);
-void nfsd_stat_counters_destroy(struct nfsd_net *nn);
void nfsd_proc_stat_init(struct net *net);
void nfsd_proc_stat_shutdown(struct net *net);
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
index 1cd2076210b1..b5e48d504062 100644
--- a/fs/nfsd/trace.h
+++ b/fs/nfsd/trace.h
@@ -749,6 +749,76 @@ TRACE_EVENT_CONDITION(nfsd_seq4_status,
)
);
+DECLARE_EVENT_CLASS(nfsd_cs_slot_class,
+ TP_PROTO(
+ const struct nfs4_client *clp,
+ const struct nfsd4_create_session *cs
+ ),
+ TP_ARGS(clp, cs),
+ TP_STRUCT__entry(
+ __field(u32, seqid)
+ __field(u32, slot_seqid)
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __sockaddr(addr, clp->cl_cb_conn.cb_addrlen)
+ ),
+ TP_fast_assign(
+ const struct nfsd4_clid_slot *slot = &clp->cl_cs_slot;
+
+ __entry->cl_boot = clp->cl_clientid.cl_boot;
+ __entry->cl_id = clp->cl_clientid.cl_id;
+ __assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
+ clp->cl_cb_conn.cb_addrlen);
+ __entry->seqid = cs->seqid;
+ __entry->slot_seqid = slot->sl_seqid;
+ ),
+ TP_printk("addr=%pISpc client %08x:%08x seqid=%u slot_seqid=%u",
+ __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
+ __entry->seqid, __entry->slot_seqid
+ )
+);
+
+#define DEFINE_CS_SLOT_EVENT(name) \
+DEFINE_EVENT(nfsd_cs_slot_class, nfsd_##name, \
+ TP_PROTO( \
+ const struct nfs4_client *clp, \
+ const struct nfsd4_create_session *cs \
+ ), \
+ TP_ARGS(clp, cs))
+
+DEFINE_CS_SLOT_EVENT(slot_seqid_conf);
+DEFINE_CS_SLOT_EVENT(slot_seqid_unconf);
+
+TRACE_EVENT(nfsd_slot_seqid_sequence,
+ TP_PROTO(
+ const struct nfs4_client *clp,
+ const struct nfsd4_sequence *seq,
+ const struct nfsd4_slot *slot
+ ),
+ TP_ARGS(clp, seq, slot),
+ TP_STRUCT__entry(
+ __field(u32, seqid)
+ __field(u32, slot_seqid)
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __sockaddr(addr, clp->cl_cb_conn.cb_addrlen)
+ __field(bool, in_use)
+ ),
+ TP_fast_assign(
+ __entry->cl_boot = clp->cl_clientid.cl_boot;
+ __entry->cl_id = clp->cl_clientid.cl_id;
+ __assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
+ clp->cl_cb_conn.cb_addrlen);
+ __entry->seqid = seq->seqid;
+ __entry->slot_seqid = slot->sl_seqid;
+ ),
+ TP_printk("addr=%pISpc client %08x:%08x seqid=%u slot_seqid=%u (%sin use)",
+ __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
+ __entry->seqid, __entry->slot_seqid,
+ __entry->in_use ? "" : "not "
+ )
+);
+
DECLARE_EVENT_CLASS(nfsd_clientid_class,
TP_PROTO(const clientid_t *clid),
TP_ARGS(clid),
@@ -778,6 +848,30 @@ DEFINE_CLIENTID_EVENT(purged);
DEFINE_CLIENTID_EVENT(renew);
DEFINE_CLIENTID_EVENT(stale);
+TRACE_EVENT(nfsd_mark_client_expired,
+ TP_PROTO(
+ const struct nfs4_client *clp,
+ int cl_rpc_users
+ ),
+ TP_ARGS(clp, cl_rpc_users),
+ TP_STRUCT__entry(
+ __field(int, cl_rpc_users)
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __sockaddr(addr, clp->cl_cb_conn.cb_addrlen)
+ ),
+ TP_fast_assign(
+ __entry->cl_rpc_users = cl_rpc_users;
+ __entry->cl_boot = clp->cl_clientid.cl_boot;
+ __entry->cl_id = clp->cl_clientid.cl_id;
+ __assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
+ clp->cl_cb_conn.cb_addrlen)
+ ),
+ TP_printk("addr=%pISpc client %08x:%08x cl_rpc_users=%d",
+ __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
+ __entry->cl_rpc_users)
+);
+
DECLARE_EVENT_CLASS(nfsd_net_class,
TP_PROTO(const struct nfsd_net *nn),
TP_ARGS(nn),
@@ -1534,7 +1628,7 @@ TRACE_EVENT(nfsd_cb_seq_status,
__entry->seq_status = cb->cb_seq_status;
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
- " sessionid=%08x:%08x:%08x:%08x tk_status=%d seq_status=%d\n",
+ " sessionid=%08x:%08x:%08x:%08x tk_status=%d seq_status=%d",
__entry->task_id, __entry->client_id,
__entry->cl_boot, __entry->cl_id,
__entry->seqno, __entry->reserved,
@@ -1573,7 +1667,7 @@ TRACE_EVENT(nfsd_cb_free_slot,
__entry->slot_seqno = session->se_cb_seq_nr;
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
- " sessionid=%08x:%08x:%08x:%08x new slot seqno=%u\n",
+ " sessionid=%08x:%08x:%08x:%08x new slot seqno=%u",
__entry->task_id, __entry->client_id,
__entry->cl_boot, __entry->cl_id,
__entry->seqno, __entry->reserved,
@@ -1978,7 +2072,7 @@ TRACE_EVENT(nfsd_ctl_time,
__entry->time = time;
__assign_str(name, name);
),
- TP_printk("file=%s time=%d\n",
+ TP_printk("file=%s time=%d",
__get_str(name), __entry->time
)
);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 6a9464262fae..29b1f3613800 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1422,7 +1422,7 @@ nfsd_create_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
* Callers expect new file metadata to be committed even
* if the attributes have not changed.
*/
- if (iap->ia_valid)
+ if (nfsd_attrs_valid(attrs))
status = nfsd_setattr(rqstp, resfhp, attrs, NULL);
else
status = nfserrno(commit_metadata(resfhp));
@@ -1852,7 +1852,7 @@ retry:
trap = lock_rename(tdentry, fdentry);
if (IS_ERR(trap)) {
err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev;
- goto out;
+ goto out_want_write;
}
err = fh_fill_pre_attrs(ffhp);
if (err != nfs_ok)
@@ -1922,6 +1922,7 @@ retry:
}
out_unlock:
unlock_rename(tdentry, fdentry);
+out_want_write:
fh_drop_write(ffhp);
/*
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index c60fdb6200fd..57cd70062048 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -60,6 +60,14 @@ static inline void nfsd_attrs_free(struct nfsd_attrs *attrs)
posix_acl_release(attrs->na_dpacl);
}
+static inline bool nfsd_attrs_valid(struct nfsd_attrs *attrs)
+{
+ struct iattr *iap = attrs->na_iattr;
+
+ return (iap->ia_valid || (attrs->na_seclabel &&
+ attrs->na_seclabel->len));
+}
+
__be32 nfserrno (int errno);
int nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
struct svc_export **expp);
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 415516c1b27e..fbdd42cde1fa 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -518,6 +518,24 @@ struct nfsd4_free_stateid {
stateid_t fr_stateid; /* request */
};
+struct nfsd4_get_dir_delegation {
+ /* request */
+ u32 gdda_signal_deleg_avail;
+ u32 gdda_notification_types[1];
+ struct timespec64 gdda_child_attr_delay;
+ struct timespec64 gdda_dir_attr_delay;
+ u32 gdda_child_attributes[3];
+ u32 gdda_dir_attributes[3];
+ /* response */
+ u32 gddrnf_status;
+ nfs4_verifier gddr_cookieverf;
+ stateid_t gddr_stateid;
+ u32 gddr_notification[1];
+ u32 gddr_child_attributes[3];
+ u32 gddr_dir_attributes[3];
+ bool gddrnf_will_signal_deleg_avail;
+};
+
/* also used for NVERIFY */
struct nfsd4_verify {
u32 ve_bmval[3]; /* request */
@@ -674,8 +692,10 @@ struct nfsd4_copy {
#define NFSD4_COPY_F_INTRA (1)
#define NFSD4_COPY_F_SYNCHRONOUS (2)
#define NFSD4_COPY_F_COMMITTED (3)
+#define NFSD4_COPY_F_COMPLETED (4)
/* response */
+ __be32 nfserr;
struct nfsd42_write_res cp_res;
struct knfsd_fh fh;
@@ -735,7 +755,8 @@ struct nfsd4_offload_status {
/* response */
u64 count;
- u32 status;
+ __be32 status;
+ bool completed;
};
struct nfsd4_copy_notify {
@@ -797,6 +818,7 @@ struct nfsd4_op {
struct nfsd4_reclaim_complete reclaim_complete;
struct nfsd4_test_stateid test_stateid;
struct nfsd4_free_stateid free_stateid;
+ struct nfsd4_get_dir_delegation get_dir_delegation;
struct nfsd4_getdeviceinfo getdeviceinfo;
struct nfsd4_layoutget layoutget;
struct nfsd4_layoutcommit layoutcommit;
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 65659fa0372e..a139970e4804 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -1857,13 +1857,22 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree,
}
/**
- * nilfs_btree_convert_and_insert -
- * @bmap:
- * @key:
- * @ptr:
- * @keys:
- * @ptrs:
- * @n:
+ * nilfs_btree_convert_and_insert - Convert and insert entries into a B-tree
+ * @btree: NILFS B-tree structure
+ * @key: Key of the new entry to be inserted
+ * @ptr: Pointer (block number) associated with the key to be inserted
+ * @keys: Array of keys to be inserted in addition to @key
+ * @ptrs: Array of pointers associated with @keys
+ * @n: Number of keys and pointers in @keys and @ptrs
+ *
+ * This function is used to insert a new entry specified by @key and @ptr,
+ * along with additional entries specified by @keys and @ptrs arrays, into a
+ * NILFS B-tree.
+ * It prepares the necessary changes by allocating the required blocks and any
+ * necessary intermediate nodes. It converts configurations from other forms of
+ * block mapping (the one that currently exists is direct mapping) to a B-tree.
+ *
+ * Return: 0 on success or a negative error code on failure.
*/
int nilfs_btree_convert_and_insert(struct nilfs_bmap *btree,
__u64 key, __u64 ptr,
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index bc846b904b68..a002a44ff161 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -174,7 +174,6 @@ Eend:
dir->i_ino, (folio->index << PAGE_SHIFT) + offs,
(unsigned long)le64_to_cpu(p->inode));
fail:
- folio_set_error(folio);
return false;
}
@@ -240,7 +239,7 @@ nilfs_filetype_table[NILFS_FT_MAX] = {
#define S_SHIFT 12
static unsigned char
-nilfs_type_by_mode[S_IFMT >> S_SHIFT] = {
+nilfs_type_by_mode[(S_IFMT >> S_SHIFT) + 1] = {
[S_IFREG >> S_SHIFT] = NILFS_FT_REG_FILE,
[S_IFDIR >> S_SHIFT] = NILFS_FT_DIR,
[S_IFCHR >> S_SHIFT] = NILFS_FT_CHRDEV,
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index bf9a11d58817..1c9ae36a03ab 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -175,6 +175,7 @@ int nilfs_init_gcinode(struct inode *inode)
/**
* nilfs_remove_all_gcinodes() - remove all unprocessed gc inodes
+ * @nilfs: NILFS filesystem instance
*/
void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs)
{
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index f1a01c191cf5..8be471ce4f19 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -60,7 +60,7 @@ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
if (argv->v_nmembs == 0)
return 0;
- if (argv->v_size > PAGE_SIZE)
+ if ((size_t)argv->v_size > PAGE_SIZE)
return -EINVAL;
/*
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index 2e29b98ba8ba..728e90be3570 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -335,8 +335,8 @@ void __nilfs_error(struct super_block *sb, const char *function,
extern struct nilfs_super_block *
nilfs_read_super_block(struct super_block *, u64, int, struct buffer_head **);
-extern int nilfs_store_magic_and_option(struct super_block *,
- struct nilfs_super_block *, char *);
+extern int nilfs_store_magic(struct super_block *sb,
+ struct nilfs_super_block *sbp);
extern int nilfs_check_feature_compatibility(struct super_block *,
struct nilfs_super_block *);
extern void nilfs_set_log_cursor(struct nilfs_super_block *,
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index 49a70c68bf3c..020f304c600e 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -563,6 +563,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
* checkpoint
* @nilfs: nilfs object
* @sb: super block instance
+ * @root: NILFS root instance
* @ri: pointer to a nilfs_recovery_info
*/
static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
@@ -698,7 +699,9 @@ static void nilfs_finish_roll_forward(struct the_nilfs *nilfs,
return;
bh = __getblk(nilfs->ns_bdev, ri->ri_lsegs_start, nilfs->ns_blocksize);
- BUG_ON(!bh);
+ if (WARN_ON(!bh))
+ return; /* should never happen */
+
memset(bh->b_data, 0, bh->b_size);
set_buffer_dirty(bh);
err = sync_dirty_buffer(bh);
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index aa5290cb7467..8654ab8ad534 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1725,14 +1725,8 @@ static void nilfs_end_folio_io(struct folio *folio, int err)
return;
}
- if (!err) {
- if (!nilfs_folio_buffers_clean(folio))
- filemap_dirty_folio(folio->mapping, folio);
- folio_clear_error(folio);
- } else {
+ if (err || !nilfs_folio_buffers_clean(folio))
filemap_dirty_folio(folio->mapping, folio);
- folio_set_error(folio);
- }
folio_end_writeback(folio);
}
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index ac24ed109ce9..e835e1f5a712 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -29,13 +29,13 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/blkdev.h>
-#include <linux/parser.h>
#include <linux/crc32.h>
#include <linux/vfs.h>
#include <linux/writeback.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include "nilfs.h"
#include "export.h"
#include "mdt.h"
@@ -61,7 +61,6 @@ struct kmem_cache *nilfs_segbuf_cachep;
struct kmem_cache *nilfs_btree_path_cache;
static int nilfs_setup_super(struct super_block *sb, int is_mount);
-static int nilfs_remount(struct super_block *sb, int *flags, char *data);
void __nilfs_msg(struct super_block *sb, const char *fmt, ...)
{
@@ -702,105 +701,98 @@ static const struct super_operations nilfs_sops = {
.freeze_fs = nilfs_freeze,
.unfreeze_fs = nilfs_unfreeze,
.statfs = nilfs_statfs,
- .remount_fs = nilfs_remount,
.show_options = nilfs_show_options
};
enum {
- Opt_err_cont, Opt_err_panic, Opt_err_ro,
- Opt_barrier, Opt_nobarrier, Opt_snapshot, Opt_order, Opt_norecovery,
- Opt_discard, Opt_nodiscard, Opt_err,
+ Opt_err, Opt_barrier, Opt_snapshot, Opt_order, Opt_norecovery,
+ Opt_discard,
};
-static match_table_t tokens = {
- {Opt_err_cont, "errors=continue"},
- {Opt_err_panic, "errors=panic"},
- {Opt_err_ro, "errors=remount-ro"},
- {Opt_barrier, "barrier"},
- {Opt_nobarrier, "nobarrier"},
- {Opt_snapshot, "cp=%u"},
- {Opt_order, "order=%s"},
- {Opt_norecovery, "norecovery"},
- {Opt_discard, "discard"},
- {Opt_nodiscard, "nodiscard"},
- {Opt_err, NULL}
+static const struct constant_table nilfs_param_err[] = {
+ {"continue", NILFS_MOUNT_ERRORS_CONT},
+ {"panic", NILFS_MOUNT_ERRORS_PANIC},
+ {"remount-ro", NILFS_MOUNT_ERRORS_RO},
+ {}
};
-static int parse_options(char *options, struct super_block *sb, int is_remount)
-{
- struct the_nilfs *nilfs = sb->s_fs_info;
- char *p;
- substring_t args[MAX_OPT_ARGS];
-
- if (!options)
- return 1;
-
- while ((p = strsep(&options, ",")) != NULL) {
- int token;
+static const struct fs_parameter_spec nilfs_param_spec[] = {
+ fsparam_enum ("errors", Opt_err, nilfs_param_err),
+ fsparam_flag_no ("barrier", Opt_barrier),
+ fsparam_u64 ("cp", Opt_snapshot),
+ fsparam_string ("order", Opt_order),
+ fsparam_flag ("norecovery", Opt_norecovery),
+ fsparam_flag_no ("discard", Opt_discard),
+ {}
+};
- if (!*p)
- continue;
+struct nilfs_fs_context {
+ unsigned long ns_mount_opt;
+ __u64 cno;
+};
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_barrier:
- nilfs_set_opt(nilfs, BARRIER);
- break;
- case Opt_nobarrier:
+static int nilfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct nilfs_fs_context *nilfs = fc->fs_private;
+ int is_remount = fc->purpose == FS_CONTEXT_FOR_RECONFIGURE;
+ struct fs_parse_result result;
+ int opt;
+
+ opt = fs_parse(fc, nilfs_param_spec, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_barrier:
+ if (result.negated)
nilfs_clear_opt(nilfs, BARRIER);
- break;
- case Opt_order:
- if (strcmp(args[0].from, "relaxed") == 0)
- /* Ordered data semantics */
- nilfs_clear_opt(nilfs, STRICT_ORDER);
- else if (strcmp(args[0].from, "strict") == 0)
- /* Strict in-order semantics */
- nilfs_set_opt(nilfs, STRICT_ORDER);
- else
- return 0;
- break;
- case Opt_err_panic:
- nilfs_write_opt(nilfs, ERROR_MODE, ERRORS_PANIC);
- break;
- case Opt_err_ro:
- nilfs_write_opt(nilfs, ERROR_MODE, ERRORS_RO);
- break;
- case Opt_err_cont:
- nilfs_write_opt(nilfs, ERROR_MODE, ERRORS_CONT);
- break;
- case Opt_snapshot:
- if (is_remount) {
- nilfs_err(sb,
- "\"%s\" option is invalid for remount",
- p);
- return 0;
- }
- break;
- case Opt_norecovery:
- nilfs_set_opt(nilfs, NORECOVERY);
- break;
- case Opt_discard:
- nilfs_set_opt(nilfs, DISCARD);
- break;
- case Opt_nodiscard:
- nilfs_clear_opt(nilfs, DISCARD);
- break;
- default:
- nilfs_err(sb, "unrecognized mount option \"%s\"", p);
- return 0;
+ else
+ nilfs_set_opt(nilfs, BARRIER);
+ break;
+ case Opt_order:
+ if (strcmp(param->string, "relaxed") == 0)
+ /* Ordered data semantics */
+ nilfs_clear_opt(nilfs, STRICT_ORDER);
+ else if (strcmp(param->string, "strict") == 0)
+ /* Strict in-order semantics */
+ nilfs_set_opt(nilfs, STRICT_ORDER);
+ else
+ return -EINVAL;
+ break;
+ case Opt_err:
+ nilfs->ns_mount_opt &= ~NILFS_MOUNT_ERROR_MODE;
+ nilfs->ns_mount_opt |= result.uint_32;
+ break;
+ case Opt_snapshot:
+ if (is_remount) {
+ struct super_block *sb = fc->root->d_sb;
+
+ nilfs_err(sb,
+ "\"%s\" option is invalid for remount",
+ param->key);
+ return -EINVAL;
+ }
+ if (result.uint_64 == 0) {
+ nilfs_err(NULL,
+ "invalid option \"cp=0\": invalid checkpoint number 0");
+ return -EINVAL;
}
+ nilfs->cno = result.uint_64;
+ break;
+ case Opt_norecovery:
+ nilfs_set_opt(nilfs, NORECOVERY);
+ break;
+ case Opt_discard:
+ if (result.negated)
+ nilfs_clear_opt(nilfs, DISCARD);
+ else
+ nilfs_set_opt(nilfs, DISCARD);
+ break;
+ default:
+ return -EINVAL;
}
- return 1;
-}
-
-static inline void
-nilfs_set_default_options(struct super_block *sb,
- struct nilfs_super_block *sbp)
-{
- struct the_nilfs *nilfs = sb->s_fs_info;
- nilfs->ns_mount_opt =
- NILFS_MOUNT_ERRORS_RO | NILFS_MOUNT_BARRIER;
+ return 0;
}
static int nilfs_setup_super(struct super_block *sb, int is_mount)
@@ -857,9 +849,8 @@ struct nilfs_super_block *nilfs_read_super_block(struct super_block *sb,
return (struct nilfs_super_block *)((char *)(*pbh)->b_data + offset);
}
-int nilfs_store_magic_and_option(struct super_block *sb,
- struct nilfs_super_block *sbp,
- char *data)
+int nilfs_store_magic(struct super_block *sb,
+ struct nilfs_super_block *sbp)
{
struct the_nilfs *nilfs = sb->s_fs_info;
@@ -870,14 +861,12 @@ int nilfs_store_magic_and_option(struct super_block *sb,
sb->s_flags |= SB_NOATIME;
#endif
- nilfs_set_default_options(sb, sbp);
-
nilfs->ns_resuid = le16_to_cpu(sbp->s_def_resuid);
nilfs->ns_resgid = le16_to_cpu(sbp->s_def_resgid);
nilfs->ns_interval = le32_to_cpu(sbp->s_c_interval);
nilfs->ns_watermark = le32_to_cpu(sbp->s_c_block_max);
- return !parse_options(data, sb, 0) ? -EINVAL : 0;
+ return 0;
}
int nilfs_check_feature_compatibility(struct super_block *sb,
@@ -1035,17 +1024,17 @@ int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno)
/**
* nilfs_fill_super() - initialize a super block instance
* @sb: super_block
- * @data: mount options
- * @silent: silent mode flag
+ * @fc: filesystem context
*
* This function is called exclusively by nilfs->ns_mount_mutex.
* So, the recovery process is protected from other simultaneous mounts.
*/
static int
-nilfs_fill_super(struct super_block *sb, void *data, int silent)
+nilfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct the_nilfs *nilfs;
struct nilfs_root *fsroot;
+ struct nilfs_fs_context *ctx = fc->fs_private;
__u64 cno;
int err;
@@ -1055,10 +1044,13 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_fs_info = nilfs;
- err = init_nilfs(nilfs, sb, (char *)data);
+ err = init_nilfs(nilfs, sb);
if (err)
goto failed_nilfs;
+ /* Copy in parsed mount options */
+ nilfs->ns_mount_opt = ctx->ns_mount_opt;
+
sb->s_op = &nilfs_sops;
sb->s_export_op = &nilfs_export_ops;
sb->s_root = NULL;
@@ -1117,34 +1109,25 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
return err;
}
-static int nilfs_remount(struct super_block *sb, int *flags, char *data)
+static int nilfs_reconfigure(struct fs_context *fc)
{
+ struct nilfs_fs_context *ctx = fc->fs_private;
+ struct super_block *sb = fc->root->d_sb;
struct the_nilfs *nilfs = sb->s_fs_info;
- unsigned long old_sb_flags;
- unsigned long old_mount_opt;
int err;
sync_filesystem(sb);
- old_sb_flags = sb->s_flags;
- old_mount_opt = nilfs->ns_mount_opt;
-
- if (!parse_options(data, sb, 1)) {
- err = -EINVAL;
- goto restore_opts;
- }
- sb->s_flags = (sb->s_flags & ~SB_POSIXACL);
err = -EINVAL;
if (!nilfs_valid_fs(nilfs)) {
nilfs_warn(sb,
"couldn't remount because the filesystem is in an incomplete recovery state");
- goto restore_opts;
+ goto ignore_opts;
}
-
- if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
+ if ((bool)(fc->sb_flags & SB_RDONLY) == sb_rdonly(sb))
goto out;
- if (*flags & SB_RDONLY) {
+ if (fc->sb_flags & SB_RDONLY) {
sb->s_flags |= SB_RDONLY;
/*
@@ -1172,138 +1155,67 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
"couldn't remount RDWR because of unsupported optional features (%llx)",
(unsigned long long)features);
err = -EROFS;
- goto restore_opts;
+ goto ignore_opts;
}
sb->s_flags &= ~SB_RDONLY;
root = NILFS_I(d_inode(sb->s_root))->i_root;
err = nilfs_attach_log_writer(sb, root);
- if (err)
- goto restore_opts;
+ if (err) {
+ sb->s_flags |= SB_RDONLY;
+ goto ignore_opts;
+ }
down_write(&nilfs->ns_sem);
nilfs_setup_super(sb, true);
up_write(&nilfs->ns_sem);
}
out:
- return 0;
-
- restore_opts:
- sb->s_flags = old_sb_flags;
- nilfs->ns_mount_opt = old_mount_opt;
- return err;
-}
-
-struct nilfs_super_data {
- __u64 cno;
- int flags;
-};
-
-static int nilfs_parse_snapshot_option(const char *option,
- const substring_t *arg,
- struct nilfs_super_data *sd)
-{
- unsigned long long val;
- const char *msg = NULL;
- int err;
-
- if (!(sd->flags & SB_RDONLY)) {
- msg = "read-only option is not specified";
- goto parse_error;
- }
-
- err = kstrtoull(arg->from, 0, &val);
- if (err) {
- if (err == -ERANGE)
- msg = "too large checkpoint number";
- else
- msg = "malformed argument";
- goto parse_error;
- } else if (val == 0) {
- msg = "invalid checkpoint number 0";
- goto parse_error;
- }
- sd->cno = val;
- return 0;
-
-parse_error:
- nilfs_err(NULL, "invalid option \"%s\": %s", option, msg);
- return 1;
-}
-
-/**
- * nilfs_identify - pre-read mount options needed to identify mount instance
- * @data: mount options
- * @sd: nilfs_super_data
- */
-static int nilfs_identify(char *data, struct nilfs_super_data *sd)
-{
- char *p, *options = data;
- substring_t args[MAX_OPT_ARGS];
- int token;
- int ret = 0;
-
- do {
- p = strsep(&options, ",");
- if (p != NULL && *p) {
- token = match_token(p, tokens, args);
- if (token == Opt_snapshot)
- ret = nilfs_parse_snapshot_option(p, &args[0],
- sd);
- }
- if (!options)
- break;
- BUG_ON(options == data);
- *(options - 1) = ',';
- } while (!ret);
- return ret;
-}
+ sb->s_flags = (sb->s_flags & ~SB_POSIXACL);
+ /* Copy over parsed remount options */
+ nilfs->ns_mount_opt = ctx->ns_mount_opt;
-static int nilfs_set_bdev_super(struct super_block *s, void *data)
-{
- s->s_dev = *(dev_t *)data;
return 0;
-}
-static int nilfs_test_bdev_super(struct super_block *s, void *data)
-{
- return !(s->s_iflags & SB_I_RETIRED) && s->s_dev == *(dev_t *)data;
+ ignore_opts:
+ return err;
}
-static struct dentry *
-nilfs_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
+static int
+nilfs_get_tree(struct fs_context *fc)
{
- struct nilfs_super_data sd = { .flags = flags };
+ struct nilfs_fs_context *ctx = fc->fs_private;
struct super_block *s;
dev_t dev;
int err;
- if (nilfs_identify(data, &sd))
- return ERR_PTR(-EINVAL);
+ if (ctx->cno && !(fc->sb_flags & SB_RDONLY)) {
+ nilfs_err(NULL,
+ "invalid option \"cp=%llu\": read-only option is not specified",
+ ctx->cno);
+ return -EINVAL;
+ }
- err = lookup_bdev(dev_name, &dev);
+ err = lookup_bdev(fc->source, &dev);
if (err)
- return ERR_PTR(err);
+ return err;
- s = sget(fs_type, nilfs_test_bdev_super, nilfs_set_bdev_super, flags,
- &dev);
+ s = sget_dev(fc, dev);
if (IS_ERR(s))
- return ERR_CAST(s);
+ return PTR_ERR(s);
if (!s->s_root) {
- err = setup_bdev_super(s, flags, NULL);
+ err = setup_bdev_super(s, fc->sb_flags, fc);
if (!err)
- err = nilfs_fill_super(s, data,
- flags & SB_SILENT ? 1 : 0);
+ err = nilfs_fill_super(s, fc);
if (err)
goto failed_super;
s->s_flags |= SB_ACTIVE;
- } else if (!sd.cno) {
+ } else if (!ctx->cno) {
if (nilfs_tree_is_busy(s->s_root)) {
- if ((flags ^ s->s_flags) & SB_RDONLY) {
+ if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
nilfs_err(s,
"the device already has a %s mount.",
sb_rdonly(s) ? "read-only" : "read/write");
@@ -1312,37 +1224,75 @@ nilfs_mount(struct file_system_type *fs_type, int flags,
}
} else {
/*
- * Try remount to setup mount states if the current
+ * Try reconfigure to setup mount states if the current
* tree is not mounted and only snapshots use this sb.
+ *
+ * Since nilfs_reconfigure() requires fc->root to be
+ * set, set it first and release it on failure.
*/
- err = nilfs_remount(s, &flags, data);
- if (err)
+ fc->root = dget(s->s_root);
+ err = nilfs_reconfigure(fc);
+ if (err) {
+ dput(fc->root);
+ fc->root = NULL; /* prevent double release */
goto failed_super;
+ }
+ return 0;
}
}
- if (sd.cno) {
+ if (ctx->cno) {
struct dentry *root_dentry;
- err = nilfs_attach_snapshot(s, sd.cno, &root_dentry);
+ err = nilfs_attach_snapshot(s, ctx->cno, &root_dentry);
if (err)
goto failed_super;
- return root_dentry;
+ fc->root = root_dentry;
+ return 0;
}
- return dget(s->s_root);
+ fc->root = dget(s->s_root);
+ return 0;
failed_super:
deactivate_locked_super(s);
- return ERR_PTR(err);
+ return err;
+}
+
+static void nilfs_free_fc(struct fs_context *fc)
+{
+ kfree(fc->fs_private);
+}
+
+static const struct fs_context_operations nilfs_context_ops = {
+ .parse_param = nilfs_parse_param,
+ .get_tree = nilfs_get_tree,
+ .reconfigure = nilfs_reconfigure,
+ .free = nilfs_free_fc,
+};
+
+static int nilfs_init_fs_context(struct fs_context *fc)
+{
+ struct nilfs_fs_context *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->ns_mount_opt = NILFS_MOUNT_ERRORS_RO | NILFS_MOUNT_BARRIER;
+ fc->fs_private = ctx;
+ fc->ops = &nilfs_context_ops;
+
+ return 0;
}
struct file_system_type nilfs_fs_type = {
.owner = THIS_MODULE,
.name = "nilfs2",
- .mount = nilfs_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
+ .init_fs_context = nilfs_init_fs_context,
+ .parameters = nilfs_param_spec,
};
MODULE_ALIAS_FS("nilfs2");
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 2ae2c1bbf6d1..f41d7b6d432c 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -592,7 +592,7 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
struct nilfs_super_block **sbp = nilfs->ns_sbp;
struct buffer_head **sbh = nilfs->ns_sbh;
u64 sb2off, devsize = bdev_nr_bytes(nilfs->ns_bdev);
- int valid[2], swp = 0;
+ int valid[2], swp = 0, older;
if (devsize < NILFS_SEG_MIN_BLOCKS * NILFS_MIN_BLOCK_SIZE + 4096) {
nilfs_err(sb, "device size too small");
@@ -648,9 +648,25 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
if (swp)
nilfs_swap_super_block(nilfs);
+ /*
+ * Calculate the array index of the older superblock data.
+ * If one has been dropped, set index 0 pointing to the remaining one,
+ * otherwise set index 1 pointing to the old one (including if both
+ * are the same).
+ *
+ * Divided case valid[0] valid[1] swp -> older
+ * -------------------------------------------------------------
+ * Both SBs are invalid 0 0 N/A (Error)
+ * SB1 is invalid 0 1 1 0
+ * SB2 is invalid 1 0 0 0
+ * SB2 is newer 1 1 1 0
+ * SB2 is older or the same 1 1 0 1
+ */
+ older = valid[1] ^ swp;
+
nilfs->ns_sbwcount = 0;
nilfs->ns_sbwtime = le64_to_cpu(sbp[0]->s_wtime);
- nilfs->ns_prot_seq = le64_to_cpu(sbp[valid[1] & !swp]->s_last_seq);
+ nilfs->ns_prot_seq = le64_to_cpu(sbp[older]->s_last_seq);
*sbpp = sbp[0];
return 0;
}
@@ -659,7 +675,6 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
* init_nilfs - initialize a NILFS instance.
* @nilfs: the_nilfs structure
* @sb: super block
- * @data: mount options
*
* init_nilfs() performs common initialization per block device (e.g.
* reading the super block, getting disk layout information, initializing
@@ -668,7 +683,7 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
* Return Value: On success, 0 is returned. On error, a negative error
* code is returned.
*/
-int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
+int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
{
struct nilfs_super_block *sbp;
int blocksize;
@@ -686,7 +701,7 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
if (err)
goto out;
- err = nilfs_store_magic_and_option(sb, sbp, data);
+ err = nilfs_store_magic(sb, sbp);
if (err)
goto failed_sbh;
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index cd4ae1b8ae16..85da0629415d 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -219,10 +219,6 @@ THE_NILFS_FNS(PURGING, purging)
#define nilfs_set_opt(nilfs, opt) \
((nilfs)->ns_mount_opt |= NILFS_MOUNT_##opt)
#define nilfs_test_opt(nilfs, opt) ((nilfs)->ns_mount_opt & NILFS_MOUNT_##opt)
-#define nilfs_write_opt(nilfs, mask, opt) \
- ((nilfs)->ns_mount_opt = \
- (((nilfs)->ns_mount_opt & ~NILFS_MOUNT_##mask) | \
- NILFS_MOUNT_##opt)) \
/**
* struct nilfs_root - nilfs root object
@@ -276,7 +272,7 @@ static inline int nilfs_sb_will_flip(struct the_nilfs *nilfs)
void nilfs_set_last_segment(struct the_nilfs *, sector_t, u64, __u64);
struct the_nilfs *alloc_nilfs(struct super_block *sb);
void destroy_nilfs(struct the_nilfs *nilfs);
-int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data);
+int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb);
int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb);
unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs);
void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs);
diff --git a/fs/ntfs3/Kconfig b/fs/ntfs3/Kconfig
index cdfdf51e55d7..7bc31d69f680 100644
--- a/fs/ntfs3/Kconfig
+++ b/fs/ntfs3/Kconfig
@@ -46,3 +46,12 @@ config NTFS3_FS_POSIX_ACL
NOTE: this is linux only feature. Windows will ignore these ACLs.
If you don't know what Access Control Lists are, say N.
+
+config NTFS_FS
+ tristate "NTFS file system support"
+ select NTFS3_FS
+ select BUFFER_HEAD
+ select NLS
+ help
+ This config option is here only for backward compatibility. NTFS
+ filesystem is now handled by the NTFS3 driver.
diff --git a/fs/ntfs3/bitmap.c b/fs/ntfs3/bitmap.c
index 845f9b22deef..c9eb01ccee51 100644
--- a/fs/ntfs3/bitmap.c
+++ b/fs/ntfs3/bitmap.c
@@ -654,7 +654,7 @@ int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits)
wnd->total_zeroes = nbits;
wnd->extent_max = MINUS_ONE_T;
wnd->zone_bit = wnd->zone_end = 0;
- wnd->nwnd = bytes_to_block(sb, bitmap_size(nbits));
+ wnd->nwnd = bytes_to_block(sb, ntfs3_bitmap_size(nbits));
wnd->bits_last = nbits & (wbits - 1);
if (!wnd->bits_last)
wnd->bits_last = wbits;
@@ -1347,7 +1347,7 @@ int wnd_extend(struct wnd_bitmap *wnd, size_t new_bits)
return -EINVAL;
/* Align to 8 byte boundary. */
- new_wnd = bytes_to_block(sb, bitmap_size(new_bits));
+ new_wnd = bytes_to_block(sb, ntfs3_bitmap_size(new_bits));
new_last = new_bits & (wbits - 1);
if (!new_last)
new_last = wbits;
diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
index 5cf3d9decf64..263635199b60 100644
--- a/fs/ntfs3/dir.c
+++ b/fs/ntfs3/dir.c
@@ -616,4 +616,11 @@ const struct file_operations ntfs_dir_operations = {
.compat_ioctl = ntfs_compat_ioctl,
#endif
};
+
+const struct file_operations ntfs_legacy_dir_operations = {
+ .llseek = generic_file_llseek,
+ .read = generic_read_dir,
+ .iterate_shared = ntfs_readdir,
+ .open = ntfs_file_open,
+};
// clang-format on
diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
index 5418662c80d8..b73969e05052 100644
--- a/fs/ntfs3/file.c
+++ b/fs/ntfs3/file.c
@@ -1236,4 +1236,12 @@ const struct file_operations ntfs_file_operations = {
.fallocate = ntfs_fallocate,
.release = ntfs_file_release,
};
+
+const struct file_operations ntfs_legacy_file_operations = {
+ .llseek = generic_file_llseek,
+ .read_iter = ntfs_file_read_iter,
+ .splice_read = ntfs_file_splice_read,
+ .open = ntfs_file_open,
+ .release = ntfs_file_release,
+};
// clang-format on
diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
index ae2ef5c11868..626d3f2c7e2d 100644
--- a/fs/ntfs3/fsntfs.c
+++ b/fs/ntfs3/fsntfs.c
@@ -522,7 +522,7 @@ static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
ni->mi.dirty = true;
/* Step 2: Resize $MFT::BITMAP. */
- new_bitmap_bytes = bitmap_size(new_mft_total);
+ new_bitmap_bytes = ntfs3_bitmap_size(new_mft_total);
err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
index daabaad63aaf..43796aaa3d97 100644
--- a/fs/ntfs3/index.c
+++ b/fs/ntfs3/index.c
@@ -1456,8 +1456,8 @@ static int indx_create_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
alloc->nres.valid_size = alloc->nres.data_size = cpu_to_le64(data_size);
- err = ni_insert_resident(ni, bitmap_size(1), ATTR_BITMAP, in->name,
- in->name_len, &bitmap, NULL, NULL);
+ err = ni_insert_resident(ni, ntfs3_bitmap_size(1), ATTR_BITMAP,
+ in->name, in->name_len, &bitmap, NULL, NULL);
if (err)
goto out2;
@@ -1518,8 +1518,9 @@ static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
if (bmp) {
/* Increase bitmap. */
err = attr_set_size(ni, ATTR_BITMAP, in->name, in->name_len,
- &indx->bitmap_run, bitmap_size(bit + 1),
- NULL, true, NULL);
+ &indx->bitmap_run,
+ ntfs3_bitmap_size(bit + 1), NULL, true,
+ NULL);
if (err)
goto out1;
}
@@ -2092,7 +2093,7 @@ static int indx_shrink(struct ntfs_index *indx, struct ntfs_inode *ni,
if (in->name == I30_NAME)
i_size_write(&ni->vfs_inode, new_data);
- bpb = bitmap_size(bit);
+ bpb = ntfs3_bitmap_size(bit);
if (bpb * 8 == nbits)
return 0;
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
index eb7a8c9fba01..d273eda1cf45 100644
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@ -440,7 +440,10 @@ end_enum:
* Usually a hard links to directories are disabled.
*/
inode->i_op = &ntfs_dir_inode_operations;
- inode->i_fop = &ntfs_dir_operations;
+ if (is_legacy_ntfs(inode->i_sb))
+ inode->i_fop = &ntfs_legacy_dir_operations;
+ else
+ inode->i_fop = &ntfs_dir_operations;
ni->i_valid = 0;
} else if (S_ISLNK(mode)) {
ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
@@ -450,7 +453,10 @@ end_enum:
} else if (S_ISREG(mode)) {
ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
inode->i_op = &ntfs_file_inode_operations;
- inode->i_fop = &ntfs_file_operations;
+ if (is_legacy_ntfs(inode->i_sb))
+ inode->i_fop = &ntfs_legacy_file_operations;
+ else
+ inode->i_fop = &ntfs_file_operations;
inode->i_mapping->a_ops = is_compressed(ni) ? &ntfs_aops_cmpr :
&ntfs_aops;
if (ino != MFT_REC_MFT)
@@ -1614,7 +1620,10 @@ struct inode *ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
if (S_ISDIR(mode)) {
inode->i_op = &ntfs_dir_inode_operations;
- inode->i_fop = &ntfs_dir_operations;
+ if (is_legacy_ntfs(inode->i_sb))
+ inode->i_fop = &ntfs_legacy_dir_operations;
+ else
+ inode->i_fop = &ntfs_dir_operations;
} else if (S_ISLNK(mode)) {
inode->i_op = &ntfs_link_inode_operations;
inode->i_fop = NULL;
@@ -1623,7 +1632,10 @@ struct inode *ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
inode_nohighmem(inode);
} else if (S_ISREG(mode)) {
inode->i_op = &ntfs_file_inode_operations;
- inode->i_fop = &ntfs_file_operations;
+ if (is_legacy_ntfs(inode->i_sb))
+ inode->i_fop = &ntfs_legacy_file_operations;
+ else
+ inode->i_fop = &ntfs_file_operations;
inode->i_mapping->a_ops = is_compressed(ni) ? &ntfs_aops_cmpr :
&ntfs_aops;
init_rwsem(&ni->file.run_lock);
diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
index 79356fd29a14..c018fad4c037 100644
--- a/fs/ntfs3/ntfs_fs.h
+++ b/fs/ntfs3/ntfs_fs.h
@@ -493,6 +493,7 @@ struct inode *dir_search_u(struct inode *dir, const struct cpu_str *uni,
struct ntfs_fnd *fnd);
bool dir_is_empty(struct inode *dir);
extern const struct file_operations ntfs_dir_operations;
+extern const struct file_operations ntfs_legacy_dir_operations;
/* Globals from file.c */
int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path,
@@ -507,6 +508,7 @@ long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg);
extern const struct inode_operations ntfs_special_inode_operations;
extern const struct inode_operations ntfs_file_inode_operations;
extern const struct file_operations ntfs_file_operations;
+extern const struct file_operations ntfs_legacy_file_operations;
/* Globals from frecord.c */
void ni_remove_mi(struct ntfs_inode *ni, struct mft_inode *mi);
@@ -966,9 +968,9 @@ static inline bool run_is_empty(struct runs_tree *run)
}
/* NTFS uses quad aligned bitmaps. */
-static inline size_t bitmap_size(size_t bits)
+static inline size_t ntfs3_bitmap_size(size_t bits)
{
- return ALIGN((bits + 7) >> 3, 8);
+ return BITS_TO_U64(bits) * sizeof(u64);
}
#define _100ns2seconds 10000000
@@ -1154,4 +1156,6 @@ static inline void le64_sub_cpu(__le64 *var, u64 val)
*var = cpu_to_le64(le64_to_cpu(*var) - val);
}
+bool is_legacy_ntfs(struct super_block *sb);
+
#endif /* _LINUX_NTFS3_NTFS_FS_H */
diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
index 9df7c20d066f..f41e01c5676a 100644
--- a/fs/ntfs3/super.c
+++ b/fs/ntfs3/super.c
@@ -408,6 +408,12 @@ static int ntfs_fs_reconfigure(struct fs_context *fc)
struct ntfs_mount_options *new_opts = fc->fs_private;
int ro_rw;
+ /* If ntfs3 is used as legacy ntfs enforce read-only mode. */
+ if (is_legacy_ntfs(sb)) {
+ fc->sb_flags |= SB_RDONLY;
+ goto out;
+ }
+
ro_rw = sb_rdonly(sb) && !(fc->sb_flags & SB_RDONLY);
if (ro_rw && (sbi->flags & NTFS_FLAGS_NEED_REPLAY)) {
errorf(fc,
@@ -427,8 +433,6 @@ static int ntfs_fs_reconfigure(struct fs_context *fc)
fc,
"ntfs3: Cannot use different iocharset when remounting!");
- sync_filesystem(sb);
-
if (ro_rw && (sbi->volume.flags & VOLUME_FLAG_DIRTY) &&
!new_opts->force) {
errorf(fc,
@@ -436,6 +440,8 @@ static int ntfs_fs_reconfigure(struct fs_context *fc)
return -EINVAL;
}
+out:
+ sync_filesystem(sb);
swap(sbi->options, fc->fs_private);
return 0;
@@ -1341,7 +1347,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
/* Check bitmap boundary. */
tt = sbi->used.bitmap.nbits;
- if (inode->i_size < bitmap_size(tt)) {
+ if (inode->i_size < ntfs3_bitmap_size(tt)) {
ntfs_err(sb, "$Bitmap is corrupted.");
err = -EINVAL;
goto put_inode_out;
@@ -1613,6 +1619,8 @@ load_root:
}
#endif
+ if (is_legacy_ntfs(sb))
+ sb->s_flags |= SB_RDONLY;
return 0;
put_inode_out:
@@ -1730,7 +1738,7 @@ static const struct fs_context_operations ntfs_context_ops = {
* This will called when mount/remount. We will first initialize
* options so that if remount we can use just that.
*/
-static int ntfs_init_fs_context(struct fs_context *fc)
+static int __ntfs_init_fs_context(struct fs_context *fc)
{
struct ntfs_mount_options *opts;
struct ntfs_sb_info *sbi;
@@ -1778,6 +1786,11 @@ free_opts:
return -ENOMEM;
}
+static int ntfs_init_fs_context(struct fs_context *fc)
+{
+ return __ntfs_init_fs_context(fc);
+}
+
static void ntfs3_kill_sb(struct super_block *sb)
{
struct ntfs_sb_info *sbi = sb->s_fs_info;
@@ -1798,6 +1811,50 @@ static struct file_system_type ntfs_fs_type = {
.kill_sb = ntfs3_kill_sb,
.fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
};
+
+#if IS_ENABLED(CONFIG_NTFS_FS)
+static int ntfs_legacy_init_fs_context(struct fs_context *fc)
+{
+ int ret;
+
+ ret = __ntfs_init_fs_context(fc);
+ /* If ntfs3 is used as legacy ntfs enforce read-only mode. */
+ fc->sb_flags |= SB_RDONLY;
+ return ret;
+}
+
+static struct file_system_type ntfs_legacy_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "ntfs",
+ .init_fs_context = ntfs_legacy_init_fs_context,
+ .parameters = ntfs_fs_parameters,
+ .kill_sb = ntfs3_kill_sb,
+ .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
+};
+MODULE_ALIAS_FS("ntfs");
+
+static inline void register_as_ntfs_legacy(void)
+{
+ int err = register_filesystem(&ntfs_legacy_fs_type);
+ if (err)
+ pr_warn("ntfs3: Failed to register legacy ntfs filesystem driver: %d\n", err);
+}
+
+static inline void unregister_as_ntfs_legacy(void)
+{
+ unregister_filesystem(&ntfs_legacy_fs_type);
+}
+bool is_legacy_ntfs(struct super_block *sb)
+{
+ return sb->s_type == &ntfs_legacy_fs_type;
+}
+#else
+static inline void register_as_ntfs_legacy(void) {}
+static inline void unregister_as_ntfs_legacy(void) {}
+bool is_legacy_ntfs(struct super_block *sb) { return false; }
+#endif
+
+
// clang-format on
static int __init init_ntfs_fs(void)
@@ -1832,6 +1889,7 @@ static int __init init_ntfs_fs(void)
goto out1;
}
+ register_as_ntfs_legacy();
err = register_filesystem(&ntfs_fs_type);
if (err)
goto out;
@@ -1849,6 +1907,7 @@ static void __exit exit_ntfs_fs(void)
rcu_barrier();
kmem_cache_destroy(ntfs_inode_cachep);
unregister_filesystem(&ntfs_fs_type);
+ unregister_as_ntfs_legacy();
ntfs3_exit_bitmap();
#ifdef CONFIG_PROC_FS
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index b82185075de7..f0467d3b3c88 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -2283,8 +2283,6 @@ unlock:
ocfs2_inode_unlock(inode, 1);
brelse(di_bh);
out:
- if (ret < 0)
- ret = -EIO;
return ret;
}
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 960080753d3b..2b8fa3e782fb 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -1784,6 +1784,9 @@ static int o2net_accept_one(struct socket *sock, int *more)
struct o2nm_node *node = NULL;
struct o2nm_node *local_node = NULL;
struct o2net_sock_container *sc = NULL;
+ struct proto_accept_arg arg = {
+ .flags = O_NONBLOCK,
+ };
struct o2net_node *nn;
unsigned int nofs_flag;
@@ -1802,7 +1805,7 @@ static int o2net_accept_one(struct socket *sock, int *more)
new_sock->type = sock->type;
new_sock->ops = sock->ops;
- ret = sock->ops->accept(sock, new_sock, O_NONBLOCK, false);
+ ret = sock->ops->accept(sock, new_sock, &arg);
if (ret < 0)
goto out;
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 5c04dde99981..2018501b2249 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -1274,7 +1274,7 @@ static int dlm_query_nodeinfo_handler(struct o2net_msg *msg, u32 len,
{
struct dlm_query_nodeinfo *qn;
struct dlm_ctxt *dlm = NULL;
- int locked = 0, status = -EINVAL;
+ int status = -EINVAL;
qn = (struct dlm_query_nodeinfo *) msg->buf;
@@ -1290,12 +1290,11 @@ static int dlm_query_nodeinfo_handler(struct o2net_msg *msg, u32 len,
}
spin_lock(&dlm->spinlock);
- locked = 1;
if (dlm->joining_node != qn->qn_nodenum) {
mlog(ML_ERROR, "Node %d queried nodes on domain %s but "
"joining node is %d\n", qn->qn_nodenum, qn->qn_domain,
dlm->joining_node);
- goto bail;
+ goto unlock;
}
/* Support for node query was added in 1.1 */
@@ -1305,14 +1304,14 @@ static int dlm_query_nodeinfo_handler(struct o2net_msg *msg, u32 len,
"but active dlm protocol is %d.%d\n", qn->qn_nodenum,
qn->qn_domain, dlm->dlm_locking_proto.pv_major,
dlm->dlm_locking_proto.pv_minor);
- goto bail;
+ goto unlock;
}
status = dlm_match_nodes(dlm, qn);
+unlock:
+ spin_unlock(&dlm->spinlock);
bail:
- if (locked)
- spin_unlock(&dlm->spinlock);
spin_unlock(&dlm_domain_lock);
return status;
@@ -1528,7 +1527,6 @@ static void dlm_send_join_asserts(struct dlm_ctxt *dlm,
{
int status, node, live;
- status = 0;
node = -1;
while ((node = find_next_bit(node_map, O2NM_MAX_NODES,
node + 1)) < O2NM_MAX_NODES) {
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index b8b6a191b5cb..96b684763b39 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -255,9 +255,9 @@ static struct dentry *ocfs2_fh_to_dentry(struct super_block *sb,
if (fh_len < 3 || fh_type > 2)
return NULL;
- handle.ih_blkno = (u64)le32_to_cpu(fid->raw[0]) << 32;
- handle.ih_blkno |= (u64)le32_to_cpu(fid->raw[1]);
- handle.ih_generation = le32_to_cpu(fid->raw[2]);
+ handle.ih_blkno = (u64)le32_to_cpu((__force __le32)fid->raw[0]) << 32;
+ handle.ih_blkno |= (u64)le32_to_cpu((__force __le32)fid->raw[1]);
+ handle.ih_generation = le32_to_cpu((__force __le32)fid->raw[2]);
return ocfs2_get_dentry(sb, &handle);
}
@@ -269,9 +269,9 @@ static struct dentry *ocfs2_fh_to_parent(struct super_block *sb,
if (fh_type != 2 || fh_len < 6)
return NULL;
- parent.ih_blkno = (u64)le32_to_cpu(fid->raw[3]) << 32;
- parent.ih_blkno |= (u64)le32_to_cpu(fid->raw[4]);
- parent.ih_generation = le32_to_cpu(fid->raw[5]);
+ parent.ih_blkno = (u64)le32_to_cpu((__force __le32)fid->raw[3]) << 32;
+ parent.ih_blkno |= (u64)le32_to_cpu((__force __le32)fid->raw[4]);
+ parent.ih_generation = le32_to_cpu((__force __le32)fid->raw[5]);
return ocfs2_get_dentry(sb, &parent);
}
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 0da8e7bd3261..ccc57038a977 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1936,6 +1936,8 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
inode_lock(inode);
+ /* Wait all existing dio workers, newcomers will block on i_rwsem */
+ inode_dio_wait(inode);
/*
* This prevents concurrent writes on other nodes
*/
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 999111bfc271..2cc5c99fe941 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -1621,6 +1621,7 @@ static struct super_block *ocfs2_inode_cache_get_super(struct ocfs2_caching_info
}
static void ocfs2_inode_cache_lock(struct ocfs2_caching_info *ci)
+__acquires(&oi->ip_lock)
{
struct ocfs2_inode_info *oi = cache_info_to_inode(ci);
@@ -1628,6 +1629,7 @@ static void ocfs2_inode_cache_lock(struct ocfs2_caching_info *ci)
}
static void ocfs2_inode_cache_unlock(struct ocfs2_caching_info *ci)
+__releases(&oi->ip_lock)
{
struct ocfs2_inode_info *oi = cache_info_to_inode(ci);
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index b1550ba73f96..71beef7f8a60 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -125,6 +125,7 @@ int ocfs2_fileattr_set(struct mnt_idmap *idmap,
ocfs2_inode->ip_attr = flags;
ocfs2_set_inode_flags(inode);
+ inode_set_ctime_current(inode);
status = ocfs2_mark_inode_dirty(handle, inode, bh);
if (status < 0)
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index c803c10dd97e..5df34561c551 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -212,14 +212,15 @@ static inline int ocfs2_la_state_enabled(struct ocfs2_super *osb)
void ocfs2_local_alloc_seen_free_bits(struct ocfs2_super *osb,
unsigned int num_clusters)
{
- spin_lock(&osb->osb_lock);
- if (osb->local_alloc_state == OCFS2_LA_DISABLED ||
- osb->local_alloc_state == OCFS2_LA_THROTTLED)
- if (num_clusters >= osb->local_alloc_default_bits) {
+ if (num_clusters >= osb->local_alloc_default_bits) {
+ spin_lock(&osb->osb_lock);
+ if (osb->local_alloc_state == OCFS2_LA_DISABLED ||
+ osb->local_alloc_state == OCFS2_LA_THROTTLED) {
cancel_delayed_work(&osb->la_enable_wq);
osb->local_alloc_state = OCFS2_LA_ENABLED;
}
- spin_unlock(&osb->osb_lock);
+ spin_unlock(&osb->osb_lock);
+ }
}
void ocfs2_la_enable_worker(struct work_struct *work)
@@ -335,7 +336,7 @@ int ocfs2_load_local_alloc(struct ocfs2_super *osb)
"found = %u, set = %u, taken = %u, off = %u\n",
num_used, le32_to_cpu(alloc->id1.bitmap1.i_used),
le32_to_cpu(alloc->id1.bitmap1.i_total),
- OCFS2_LOCAL_ALLOC(alloc)->la_bm_off);
+ le32_to_cpu(OCFS2_LOCAL_ALLOC(alloc)->la_bm_off));
status = -EINVAL;
goto bail;
@@ -863,14 +864,8 @@ static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
numfound = bitoff = startoff = 0;
left = le32_to_cpu(alloc->id1.bitmap1.i_total);
- while ((bitoff = ocfs2_find_next_zero_bit(bitmap, left, startoff)) != -1) {
- if (bitoff == left) {
- /* mlog(0, "bitoff (%d) == left", bitoff); */
- break;
- }
- /* mlog(0, "Found a zero: bitoff = %d, startoff = %d, "
- "numfound = %d\n", bitoff, startoff, numfound);*/
-
+ while ((bitoff = ocfs2_find_next_zero_bit(bitmap, left, startoff)) <
+ left) {
/* Ok, we found a zero bit... is it contig. or do we
* start over?*/
if (bitoff == startoff) {
@@ -976,9 +971,9 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
start = count = 0;
left = le32_to_cpu(alloc->id1.bitmap1.i_total);
- while ((bit_off = ocfs2_find_next_zero_bit(bitmap, left, start))
- != -1) {
- if ((bit_off < left) && (bit_off == start)) {
+ while ((bit_off = ocfs2_find_next_zero_bit(bitmap, left, start)) <
+ left) {
+ if (bit_off == start) {
count++;
start++;
continue;
@@ -1002,8 +997,7 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
goto bail;
}
}
- if (bit_off >= left)
- break;
+
count = 1;
start = bit_off + 1;
}
@@ -1220,7 +1214,7 @@ retry_enospc:
OCFS2_LOCAL_ALLOC(alloc)->la_bitmap);
trace_ocfs2_local_alloc_new_window_result(
- OCFS2_LOCAL_ALLOC(alloc)->la_bm_off,
+ le32_to_cpu(OCFS2_LOCAL_ALLOC(alloc)->la_bm_off),
le32_to_cpu(alloc->id1.bitmap1.i_total));
bail:
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index 1f9ed117e78b..f9d6a4f9ca92 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -685,7 +685,7 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
}
ret = ocfs2_block_group_set_bits(handle, gb_inode, gd, gd_bh,
- goal_bit, len);
+ goal_bit, len, 0, 0);
if (ret) {
ocfs2_rollback_alloc_dinode_counts(gb_inode, gb_bh, len,
le16_to_cpu(gd->bg_chain));
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 9221a33f917b..4d1ea8703fcd 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -566,7 +566,7 @@ static int __ocfs2_mknod_locked(struct inode *dir,
fe->i_last_eb_blk = 0;
strcpy(fe->i_signature, OCFS2_INODE_SIGNATURE);
fe->i_flags |= cpu_to_le32(OCFS2_VALID_FL);
- ktime_get_real_ts64(&ts);
+ ktime_get_coarse_real_ts64(&ts);
fe->i_atime = fe->i_ctime = fe->i_mtime =
cpu_to_le64(ts.tv_sec);
fe->i_mtime_nsec = fe->i_ctime_nsec = fe->i_atime_nsec =
@@ -797,6 +797,7 @@ static int ocfs2_link(struct dentry *old_dentry,
ocfs2_set_links_count(fe, inode->i_nlink);
fe->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
fe->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
+ ocfs2_update_inode_fsync_trans(handle, inode, 0);
ocfs2_journal_dirty(handle, fe_bh);
err = ocfs2_add_entry(handle, dentry, inode,
@@ -993,6 +994,7 @@ static int ocfs2_unlink(struct inode *dir,
drop_nlink(inode);
drop_nlink(inode);
ocfs2_set_links_count(fe, inode->i_nlink);
+ ocfs2_update_inode_fsync_trans(handle, inode, 0);
ocfs2_journal_dirty(handle, fe_bh);
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 7aebdbf5cc0a..c93689b568fe 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -883,7 +883,8 @@ struct ocfs2_group_desc
__le16 bg_free_bits_count; /* Free bits count */
__le16 bg_chain; /* What chain I am in. */
/*10*/ __le32 bg_generation;
- __le32 bg_reserved1;
+ __le16 bg_contig_free_bits; /* max contig free bits length */
+ __le16 bg_reserved1;
__le64 bg_next_group; /* Next group in my list, in
blocks */
/*20*/ __le64 bg_parent_dinode; /* dinode which owns me, in
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 3f80a56d0d60..1f303b1adf1a 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -630,7 +630,7 @@ static int ocfs2_create_refcount_tree(struct inode *inode,
rb->rf_records.rl_count =
cpu_to_le16(ocfs2_refcount_recs_per_rb(osb->sb));
spin_lock(&osb->osb_lock);
- rb->rf_generation = osb->s_next_generation++;
+ rb->rf_generation = cpu_to_le32(osb->s_next_generation++);
spin_unlock(&osb->osb_lock);
ocfs2_journal_dirty(handle, new_bh);
diff --git a/fs/ocfs2/reservations.c b/fs/ocfs2/reservations.c
index a9d1296d736d..1fe61974d9f0 100644
--- a/fs/ocfs2/reservations.c
+++ b/fs/ocfs2/reservations.c
@@ -414,7 +414,7 @@ static int ocfs2_resmap_find_free_bits(struct ocfs2_reservation_map *resmap,
start = search_start;
while ((offset = ocfs2_find_next_zero_bit(bitmap, resmap->m_bitmap_len,
- start)) != -1) {
+ start)) < resmap->m_bitmap_len) {
/* Search reached end of the region */
if (offset >= (search_start + search_len))
break;
diff --git a/fs/ocfs2/resize.c b/fs/ocfs2/resize.c
index d65d43c61857..c4a4016d3866 100644
--- a/fs/ocfs2/resize.c
+++ b/fs/ocfs2/resize.c
@@ -91,6 +91,8 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle,
u16 cl_bpc = le16_to_cpu(cl->cl_bpc);
u16 cl_cpg = le16_to_cpu(cl->cl_cpg);
u16 old_bg_clusters;
+ u16 contig_bits;
+ __le16 old_bg_contig_free_bits;
trace_ocfs2_update_last_group_and_inode(new_clusters,
first_new_cluster);
@@ -122,6 +124,11 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle,
le16_add_cpu(&group->bg_free_bits_count, -1 * backups);
}
+ contig_bits = ocfs2_find_max_contig_free_bits(group->bg_bitmap,
+ le16_to_cpu(group->bg_bits), 0);
+ old_bg_contig_free_bits = group->bg_contig_free_bits;
+ group->bg_contig_free_bits = cpu_to_le16(contig_bits);
+
ocfs2_journal_dirty(handle, group_bh);
/* update the inode accordingly. */
@@ -160,6 +167,7 @@ out_rollback:
le16_add_cpu(&group->bg_free_bits_count, backups);
le16_add_cpu(&group->bg_bits, -1 * num_bits);
le16_add_cpu(&group->bg_free_bits_count, -1 * num_bits);
+ group->bg_contig_free_bits = old_bg_contig_free_bits;
}
out:
if (ret)
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 166c8918c825..f7b483f0de2a 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -50,6 +50,10 @@ struct ocfs2_suballoc_result {
u64 sr_blkno; /* The first allocated block */
unsigned int sr_bit_offset; /* The bit in the bg */
unsigned int sr_bits; /* How many bits we claimed */
+ unsigned int sr_max_contig_bits; /* The length for contiguous
+ * free bits, only available
+ * for cluster group
+ */
};
static u64 ocfs2_group_from_res(struct ocfs2_suballoc_result *res)
@@ -1272,6 +1276,26 @@ static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
return ret;
}
+u16 ocfs2_find_max_contig_free_bits(void *bitmap,
+ u16 total_bits, u16 start)
+{
+ u16 offset, free_bits;
+ u16 contig_bits = 0;
+
+ while (start < total_bits) {
+ offset = ocfs2_find_next_zero_bit(bitmap, total_bits, start);
+ if (offset == total_bits)
+ break;
+
+ start = ocfs2_find_next_bit(bitmap, total_bits, offset);
+ free_bits = start - offset;
+ if (contig_bits < free_bits)
+ contig_bits = free_bits;
+ }
+
+ return contig_bits;
+}
+
static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
struct buffer_head *bg_bh,
unsigned int bits_wanted,
@@ -1280,6 +1304,7 @@ static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
{
void *bitmap;
u16 best_offset, best_size;
+ u16 prev_best_size = 0;
int offset, start, found, status = 0;
struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
@@ -1290,10 +1315,8 @@ static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
found = start = best_offset = best_size = 0;
bitmap = bg->bg_bitmap;
- while((offset = ocfs2_find_next_zero_bit(bitmap, total_bits, start)) != -1) {
- if (offset == total_bits)
- break;
-
+ while ((offset = ocfs2_find_next_zero_bit(bitmap, total_bits, start)) <
+ total_bits) {
if (!ocfs2_test_bg_bit_allocatable(bg_bh, offset)) {
/* We found a zero, but we can't use it as it
* hasn't been put to disk yet! */
@@ -1308,6 +1331,7 @@ static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
/* got a zero after some ones */
found = 1;
start = offset + 1;
+ prev_best_size = best_size;
}
if (found > best_size) {
best_size = found;
@@ -1320,6 +1344,8 @@ static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
}
}
+ /* best_size will be allocated, we save prev_best_size */
+ res->sr_max_contig_bits = prev_best_size;
if (best_size) {
res->sr_bit_offset = best_offset;
res->sr_bits = best_size;
@@ -1337,11 +1363,16 @@ int ocfs2_block_group_set_bits(handle_t *handle,
struct ocfs2_group_desc *bg,
struct buffer_head *group_bh,
unsigned int bit_off,
- unsigned int num_bits)
+ unsigned int num_bits,
+ unsigned int max_contig_bits,
+ int fastpath)
{
int status;
void *bitmap = bg->bg_bitmap;
int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
+ unsigned int start = bit_off + num_bits;
+ u16 contig_bits;
+ struct ocfs2_super *osb = OCFS2_SB(alloc_inode->i_sb);
/* All callers get the descriptor via
* ocfs2_read_group_descriptor(). Any corruption is a code bug. */
@@ -1373,6 +1404,29 @@ int ocfs2_block_group_set_bits(handle_t *handle,
while(num_bits--)
ocfs2_set_bit(bit_off++, bitmap);
+ /*
+ * this is optimize path, caller set old contig value
+ * in max_contig_bits to bypass finding action.
+ */
+ if (fastpath) {
+ bg->bg_contig_free_bits = cpu_to_le16(max_contig_bits);
+ } else if (ocfs2_is_cluster_bitmap(alloc_inode)) {
+ /*
+ * Usually, the block group bitmap allocates only 1 bit
+ * at a time, while the cluster group allocates n bits
+ * each time. Therefore, we only save the contig bits for
+ * the cluster group.
+ */
+ contig_bits = ocfs2_find_max_contig_free_bits(bitmap,
+ le16_to_cpu(bg->bg_bits), start);
+ if (contig_bits > max_contig_bits)
+ max_contig_bits = contig_bits;
+ bg->bg_contig_free_bits = cpu_to_le16(max_contig_bits);
+ ocfs2_local_alloc_seen_free_bits(osb, max_contig_bits);
+ } else {
+ bg->bg_contig_free_bits = 0;
+ }
+
ocfs2_journal_dirty(handle, group_bh);
bail:
@@ -1486,7 +1540,12 @@ static int ocfs2_cluster_group_search(struct inode *inode,
BUG_ON(!ocfs2_is_cluster_bitmap(inode));
- if (gd->bg_free_bits_count) {
+ if (le16_to_cpu(gd->bg_contig_free_bits) &&
+ le16_to_cpu(gd->bg_contig_free_bits) < bits_wanted)
+ return -ENOSPC;
+
+ /* ->bg_contig_free_bits may un-initialized, so compare again */
+ if (le16_to_cpu(gd->bg_free_bits_count) >= bits_wanted) {
max_bits = le16_to_cpu(gd->bg_bits);
/* Tail groups in cluster bitmaps which aren't cpg
@@ -1530,13 +1589,6 @@ static int ocfs2_cluster_group_search(struct inode *inode,
* of bits. */
if (min_bits <= res->sr_bits)
search = 0; /* success */
- else if (res->sr_bits) {
- /*
- * Don't show bits which we'll be returning
- * for allocation to the local alloc bitmap.
- */
- ocfs2_local_alloc_seen_free_bits(osb, res->sr_bits);
- }
}
return search;
@@ -1555,7 +1607,7 @@ static int ocfs2_block_group_search(struct inode *inode,
BUG_ON(min_bits != 1);
BUG_ON(ocfs2_is_cluster_bitmap(inode));
- if (bg->bg_free_bits_count) {
+ if (le16_to_cpu(bg->bg_free_bits_count) >= bits_wanted) {
ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
group_bh, bits_wanted,
le16_to_cpu(bg->bg_bits),
@@ -1715,7 +1767,8 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
}
ret = ocfs2_block_group_set_bits(handle, alloc_inode, gd, group_bh,
- res->sr_bit_offset, res->sr_bits);
+ res->sr_bit_offset, res->sr_bits,
+ res->sr_max_contig_bits, 0);
if (ret < 0) {
ocfs2_rollback_alloc_dinode_counts(alloc_inode, ac->ac_bh,
res->sr_bits,
@@ -1849,7 +1902,9 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
bg,
group_bh,
res->sr_bit_offset,
- res->sr_bits);
+ res->sr_bits,
+ res->sr_max_contig_bits,
+ 0);
if (status < 0) {
ocfs2_rollback_alloc_dinode_counts(alloc_inode,
ac->ac_bh, res->sr_bits, chain);
@@ -1951,7 +2006,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i ++) {
if (i == victim)
continue;
- if (!cl->cl_recs[i].c_free)
+ if (le32_to_cpu(cl->cl_recs[i].c_free) < bits_wanted)
continue;
ac->ac_chain = i;
@@ -2163,7 +2218,9 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
bg,
bg_bh,
res->sr_bit_offset,
- res->sr_bits);
+ res->sr_bits,
+ res->sr_max_contig_bits,
+ 0);
if (ret < 0) {
ocfs2_rollback_alloc_dinode_counts(ac->ac_inode,
ac->ac_bh, res->sr_bits, chain);
@@ -2382,11 +2439,13 @@ static int ocfs2_block_group_clear_bits(handle_t *handle,
struct buffer_head *group_bh,
unsigned int bit_off,
unsigned int num_bits,
+ unsigned int max_contig_bits,
void (*undo_fn)(unsigned int bit,
unsigned long *bmap))
{
int status;
unsigned int tmp;
+ u16 contig_bits;
struct ocfs2_group_desc *undo_bg = NULL;
struct journal_head *jh;
@@ -2433,6 +2492,20 @@ static int ocfs2_block_group_clear_bits(handle_t *handle,
num_bits);
}
+ /*
+ * TODO: even 'num_bits == 1' (the worst case, release 1 cluster),
+ * we still need to rescan whole bitmap.
+ */
+ if (ocfs2_is_cluster_bitmap(alloc_inode)) {
+ contig_bits = ocfs2_find_max_contig_free_bits(bg->bg_bitmap,
+ le16_to_cpu(bg->bg_bits), 0);
+ if (contig_bits > max_contig_bits)
+ max_contig_bits = contig_bits;
+ bg->bg_contig_free_bits = cpu_to_le16(max_contig_bits);
+ } else {
+ bg->bg_contig_free_bits = 0;
+ }
+
if (undo_fn)
spin_unlock(&jh->b_state_lock);
@@ -2459,6 +2532,7 @@ static int _ocfs2_free_suballoc_bits(handle_t *handle,
struct ocfs2_chain_list *cl = &fe->id2.i_chain;
struct buffer_head *group_bh = NULL;
struct ocfs2_group_desc *group;
+ __le16 old_bg_contig_free_bits = 0;
/* The alloc_bh comes from ocfs2_free_dinode() or
* ocfs2_free_clusters(). The callers have all locked the
@@ -2483,9 +2557,11 @@ static int _ocfs2_free_suballoc_bits(handle_t *handle,
BUG_ON((count + start_bit) > le16_to_cpu(group->bg_bits));
+ if (ocfs2_is_cluster_bitmap(alloc_inode))
+ old_bg_contig_free_bits = group->bg_contig_free_bits;
status = ocfs2_block_group_clear_bits(handle, alloc_inode,
group, group_bh,
- start_bit, count, undo_fn);
+ start_bit, count, 0, undo_fn);
if (status < 0) {
mlog_errno(status);
goto bail;
@@ -2496,7 +2572,8 @@ static int _ocfs2_free_suballoc_bits(handle_t *handle,
if (status < 0) {
mlog_errno(status);
ocfs2_block_group_set_bits(handle, alloc_inode, group, group_bh,
- start_bit, count);
+ start_bit, count,
+ le16_to_cpu(old_bg_contig_free_bits), 1);
goto bail;
}
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
index 9c74eace3adc..b481b834857d 100644
--- a/fs/ocfs2/suballoc.h
+++ b/fs/ocfs2/suballoc.h
@@ -79,12 +79,16 @@ void ocfs2_rollback_alloc_dinode_counts(struct inode *inode,
struct buffer_head *di_bh,
u32 num_bits,
u16 chain);
+u16 ocfs2_find_max_contig_free_bits(void *bitmap,
+ u16 total_bits, u16 start);
int ocfs2_block_group_set_bits(handle_t *handle,
struct inode *alloc_inode,
struct ocfs2_group_desc *bg,
struct buffer_head *group_bh,
unsigned int bit_off,
- unsigned int num_bits);
+ unsigned int num_bits,
+ unsigned int max_contig_bits,
+ int fastpath);
int ocfs2_claim_metadata(handle_t *handle,
struct ocfs2_alloc_context *ac,
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index 4a0779e3ef79..a7b527ea50d3 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -355,10 +355,10 @@ static struct inode *openprom_iget(struct super_block *sb, ino_t ino)
return inode;
}
-static int openprom_remount(struct super_block *sb, int *flags, char *data)
+static int openpromfs_reconfigure(struct fs_context *fc)
{
- sync_filesystem(sb);
- *flags |= SB_NOATIME;
+ sync_filesystem(fc->root->d_sb);
+ fc->sb_flags |= SB_NOATIME;
return 0;
}
@@ -366,7 +366,6 @@ static const struct super_operations openprom_sops = {
.alloc_inode = openprom_alloc_inode,
.free_inode = openprom_free_inode,
.statfs = simple_statfs,
- .remount_fs = openprom_remount,
};
static int openprom_fill_super(struct super_block *s, struct fs_context *fc)
@@ -415,6 +414,7 @@ static int openpromfs_get_tree(struct fs_context *fc)
static const struct fs_context_operations openpromfs_context_ops = {
.get_tree = openpromfs_get_tree,
+ .reconfigure = openpromfs_reconfigure,
};
static int openpromfs_init_fs_context(struct fs_context *fc)
diff --git a/fs/orangefs/dcache.c b/fs/orangefs/dcache.c
index 8bbe9486e3a6..395a00ed8ac7 100644
--- a/fs/orangefs/dcache.c
+++ b/fs/orangefs/dcache.c
@@ -33,9 +33,7 @@ static int orangefs_revalidate_lookup(struct dentry *dentry)
new_op->upcall.req.lookup.sym_follow = ORANGEFS_LOOKUP_LINK_NO_FOLLOW;
new_op->upcall.req.lookup.parent_refn = parent->refn;
- strncpy(new_op->upcall.req.lookup.d_name,
- dentry->d_name.name,
- ORANGEFS_NAME_MAX - 1);
+ strscpy(new_op->upcall.req.lookup.d_name, dentry->d_name.name);
gossip_debug(GOSSIP_DCACHE_DEBUG,
"%s:%s:%d interrupt flag [%d]\n",
diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
index c9dfd5c6a097..200558ec72f0 100644
--- a/fs/orangefs/namei.c
+++ b/fs/orangefs/namei.c
@@ -41,8 +41,7 @@ static int orangefs_create(struct mnt_idmap *idmap,
fill_default_sys_attrs(new_op->upcall.req.create.attributes,
ORANGEFS_TYPE_METAFILE, mode);
- strncpy(new_op->upcall.req.create.d_name,
- dentry->d_name.name, ORANGEFS_NAME_MAX - 1);
+ strscpy(new_op->upcall.req.create.d_name, dentry->d_name.name);
ret = service_operation(new_op, __func__, get_interruptible_flag(dir));
@@ -137,8 +136,7 @@ static struct dentry *orangefs_lookup(struct inode *dir, struct dentry *dentry,
&parent->refn.khandle);
new_op->upcall.req.lookup.parent_refn = parent->refn;
- strncpy(new_op->upcall.req.lookup.d_name, dentry->d_name.name,
- ORANGEFS_NAME_MAX - 1);
+ strscpy(new_op->upcall.req.lookup.d_name, dentry->d_name.name);
gossip_debug(GOSSIP_NAME_DEBUG,
"%s: doing lookup on %s under %pU,%d\n",
@@ -192,8 +190,7 @@ static int orangefs_unlink(struct inode *dir, struct dentry *dentry)
return -ENOMEM;
new_op->upcall.req.remove.parent_refn = parent->refn;
- strncpy(new_op->upcall.req.remove.d_name, dentry->d_name.name,
- ORANGEFS_NAME_MAX - 1);
+ strscpy(new_op->upcall.req.remove.d_name, dentry->d_name.name);
ret = service_operation(new_op, "orangefs_unlink",
get_interruptible_flag(inode));
@@ -247,10 +244,8 @@ static int orangefs_symlink(struct mnt_idmap *idmap,
ORANGEFS_TYPE_SYMLINK,
mode);
- strncpy(new_op->upcall.req.sym.entry_name,
- dentry->d_name.name,
- ORANGEFS_NAME_MAX - 1);
- strncpy(new_op->upcall.req.sym.target, symname, ORANGEFS_NAME_MAX - 1);
+ strscpy(new_op->upcall.req.sym.entry_name, dentry->d_name.name);
+ strscpy(new_op->upcall.req.sym.target, symname);
ret = service_operation(new_op, __func__, get_interruptible_flag(dir));
@@ -324,8 +319,7 @@ static int orangefs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
fill_default_sys_attrs(new_op->upcall.req.mkdir.attributes,
ORANGEFS_TYPE_DIRECTORY, mode);
- strncpy(new_op->upcall.req.mkdir.d_name,
- dentry->d_name.name, ORANGEFS_NAME_MAX - 1);
+ strscpy(new_op->upcall.req.mkdir.d_name, dentry->d_name.name);
ret = service_operation(new_op, __func__, get_interruptible_flag(dir));
@@ -405,12 +399,8 @@ static int orangefs_rename(struct mnt_idmap *idmap,
new_op->upcall.req.rename.old_parent_refn = ORANGEFS_I(old_dir)->refn;
new_op->upcall.req.rename.new_parent_refn = ORANGEFS_I(new_dir)->refn;
- strncpy(new_op->upcall.req.rename.d_old_name,
- old_dentry->d_name.name,
- ORANGEFS_NAME_MAX - 1);
- strncpy(new_op->upcall.req.rename.d_new_name,
- new_dentry->d_name.name,
- ORANGEFS_NAME_MAX - 1);
+ strscpy(new_op->upcall.req.rename.d_old_name, old_dentry->d_name.name);
+ strscpy(new_op->upcall.req.rename.d_new_name, new_dentry->d_name.name);
ret = service_operation(new_op,
"orangefs_rename",
diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
index 34849b4a3243..eba3e357192e 100644
--- a/fs/orangefs/super.c
+++ b/fs/orangefs/super.c
@@ -201,7 +201,8 @@ static int orangefs_statfs(struct dentry *dentry, struct kstatfs *buf)
(long)new_op->downcall.resp.statfs.files_avail);
buf->f_type = sb->s_magic;
- memcpy(&buf->f_fsid, &ORANGEFS_SB(sb)->fs_id, sizeof(buf->f_fsid));
+ buf->f_fsid.val[0] = ORANGEFS_SB(sb)->fs_id;
+ buf->f_fsid.val[1] = ORANGEFS_SB(sb)->id;
buf->f_bsize = new_op->downcall.resp.statfs.block_size;
buf->f_namelen = ORANGEFS_NAME_MAX;
@@ -253,9 +254,8 @@ int orangefs_remount(struct orangefs_sb_info_s *orangefs_sb)
new_op = op_alloc(ORANGEFS_VFS_OP_FS_MOUNT);
if (!new_op)
return -ENOMEM;
- strncpy(new_op->upcall.req.fs_mount.orangefs_config_server,
- orangefs_sb->devname,
- ORANGEFS_MAX_SERVER_ADDR_LEN);
+ strscpy(new_op->upcall.req.fs_mount.orangefs_config_server,
+ orangefs_sb->devname);
gossip_debug(GOSSIP_SUPER_DEBUG,
"Attempting ORANGEFS Remount via host %s\n",
@@ -400,8 +400,7 @@ static int orangefs_unmount(int id, __s32 fs_id, const char *devname)
return -ENOMEM;
op->upcall.req.fs_umount.id = id;
op->upcall.req.fs_umount.fs_id = fs_id;
- strncpy(op->upcall.req.fs_umount.orangefs_config_server,
- devname, ORANGEFS_MAX_SERVER_ADDR_LEN - 1);
+ strscpy(op->upcall.req.fs_umount.orangefs_config_server, devname);
r = service_operation(op, "orangefs_fs_umount", 0);
/* Not much to do about an error here. */
if (r)
@@ -494,9 +493,7 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
if (!new_op)
return ERR_PTR(-ENOMEM);
- strncpy(new_op->upcall.req.fs_mount.orangefs_config_server,
- devname,
- ORANGEFS_MAX_SERVER_ADDR_LEN - 1);
+ strscpy(new_op->upcall.req.fs_mount.orangefs_config_server, devname);
gossip_debug(GOSSIP_SUPER_DEBUG,
"Attempting ORANGEFS Mount via host %s\n",
@@ -543,9 +540,8 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
* on successful mount, store the devname and data
* used
*/
- strncpy(ORANGEFS_SB(sb)->devname,
- devname,
- ORANGEFS_MAX_SERVER_ADDR_LEN - 1);
+ strscpy(ORANGEFS_SB(sb)->devname, devname);
+
/* mount_pending must be cleared */
ORANGEFS_SB(sb)->mount_pending = 0;
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 0762575a1e70..a5ef2005a2cc 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -114,7 +114,7 @@ int ovl_copy_xattr(struct super_block *sb, const struct path *oldpath, struct de
if (ovl_is_private_xattr(sb, name))
continue;
- error = security_inode_copy_up_xattr(name);
+ error = security_inode_copy_up_xattr(old, name);
if (error < 0 && error != -EOPNOTSUPP)
break;
if (error == 1) {
diff --git a/fs/overlayfs/params.c b/fs/overlayfs/params.c
index 36dcc530ac28..4860fcc4611b 100644
--- a/fs/overlayfs/params.c
+++ b/fs/overlayfs/params.c
@@ -139,10 +139,6 @@ static int ovl_verity_mode_def(void)
return OVL_VERITY_OFF;
}
-#define fsparam_string_empty(NAME, OPT) \
- __fsparam(fs_param_is_string, NAME, OPT, fs_param_can_be_empty, NULL)
-
-
const struct fs_parameter_spec ovl_parameter_spec[] = {
fsparam_string_empty("lowerdir", Opt_lowerdir),
fsparam_string("lowerdir+", Opt_lowerdir_add),
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index a40fc7e05525..06a231970cb5 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -1460,7 +1460,7 @@ int ovl_fill_super(struct super_block *sb, struct fs_context *fc)
* lead to unexpected results.
*/
sb->s_iflags |= SB_I_NOUMASK;
- sb->s_iflags |= SB_I_EVM_UNSUPPORTED;
+ sb->s_iflags |= SB_I_EVM_HMAC_UNSUPPORTED;
err = -ENOMEM;
root_dentry = ovl_get_root(sb, ctx->upper.dentry, oe);
diff --git a/fs/proc/Makefile b/fs/proc/Makefile
index bd08616ed8ba..7b4db9c56e6a 100644
--- a/fs/proc/Makefile
+++ b/fs/proc/Makefile
@@ -5,7 +5,7 @@
obj-y += proc.o
-CFLAGS_task_mmu.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_task_mmu.o += -Wno-override-init
proc-y := nommu.o task_nommu.o
proc-$(CONFIG_MMU) := task_mmu.o
diff --git a/fs/proc/bootconfig.c b/fs/proc/bootconfig.c
index 902b326e1e56..87dcaae32ff8 100644
--- a/fs/proc/bootconfig.c
+++ b/fs/proc/bootconfig.c
@@ -62,12 +62,12 @@ static int __init copy_xbc_key_value_list(char *dst, size_t size)
break;
dst += ret;
}
- if (ret >= 0 && boot_command_line[0]) {
- ret = snprintf(dst, rest(dst, end), "# Parameters from bootloader:\n# %s\n",
- boot_command_line);
- if (ret > 0)
- dst += ret;
- }
+ }
+ if (cmdline_has_extra_options() && ret >= 0 && boot_command_line[0]) {
+ ret = snprintf(dst, rest(dst, end), "# Parameters from bootloader:\n# %s\n",
+ boot_command_line);
+ if (ret > 0)
+ dst += ret;
}
out:
kfree(key);
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index 6e72e5ad42bc..f4b1c8b42a51 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -74,7 +74,18 @@ out:
return 0;
}
-static int proc_fdinfo_access_allowed(struct inode *inode)
+static int seq_fdinfo_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, seq_show, inode);
+}
+
+/**
+ * Shared /proc/pid/fdinfo and /proc/pid/fdinfo/fd permission helper to ensure
+ * that the current task has PTRACE_MODE_READ in addition to the normal
+ * POSIX-like checks.
+ */
+static int proc_fdinfo_permission(struct mnt_idmap *idmap, struct inode *inode,
+ int mask)
{
bool allowed = false;
struct task_struct *task = get_proc_task(inode);
@@ -88,18 +99,13 @@ static int proc_fdinfo_access_allowed(struct inode *inode)
if (!allowed)
return -EACCES;
- return 0;
+ return generic_permission(idmap, inode, mask);
}
-static int seq_fdinfo_open(struct inode *inode, struct file *file)
-{
- int ret = proc_fdinfo_access_allowed(inode);
-
- if (ret)
- return ret;
-
- return single_open(file, seq_show, inode);
-}
+static const struct inode_operations proc_fdinfo_file_inode_operations = {
+ .permission = proc_fdinfo_permission,
+ .setattr = proc_setattr,
+};
static const struct file_operations proc_fdinfo_file_operations = {
.open = seq_fdinfo_open,
@@ -388,6 +394,8 @@ static struct dentry *proc_fdinfo_instantiate(struct dentry *dentry,
ei = PROC_I(inode);
ei->fd = data->fd;
+ inode->i_op = &proc_fdinfo_file_inode_operations;
+
inode->i_fop = &proc_fdinfo_file_operations;
tid_fd_update_inode(task, inode, 0);
@@ -407,23 +415,13 @@ static int proc_readfdinfo(struct file *file, struct dir_context *ctx)
proc_fdinfo_instantiate);
}
-static int proc_open_fdinfo(struct inode *inode, struct file *file)
-{
- int ret = proc_fdinfo_access_allowed(inode);
-
- if (ret)
- return ret;
-
- return 0;
-}
-
const struct inode_operations proc_fdinfo_inode_operations = {
.lookup = proc_lookupfdinfo,
+ .permission = proc_fdinfo_permission,
.setattr = proc_setattr,
};
const struct file_operations proc_fdinfo_operations = {
- .open = proc_open_fdinfo,
.read = generic_read_dir,
.iterate_shared = proc_readfdinfo,
.llseek = generic_file_llseek,
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index dcd513dccf55..d19434e2a58e 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -451,15 +451,13 @@ pde_get_unmapped_area(struct proc_dir_entry *pde, struct file *file, unsigned lo
unsigned long len, unsigned long pgoff,
unsigned long flags)
{
- typeof_member(struct proc_ops, proc_get_unmapped_area) get_area;
+ if (pde->proc_ops->proc_get_unmapped_area)
+ return pde->proc_ops->proc_get_unmapped_area(file, orig_addr, len, pgoff, flags);
- get_area = pde->proc_ops->proc_get_unmapped_area;
#ifdef CONFIG_MMU
- if (!get_area)
- get_area = current->mm->get_unmapped_area;
+ return mm_get_unmapped_area(current->mm, file, orig_addr, len, pgoff, flags);
#endif
- if (get_area)
- return get_area(file, orig_addr, len, pgoff, flags);
+
return orig_addr;
}
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 45af9a989d40..245171d9164b 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -89,8 +89,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
show_val_kb(m, "SwapTotal: ", i.totalswap);
show_val_kb(m, "SwapFree: ", i.freeswap);
#ifdef CONFIG_ZSWAP
- seq_printf(m, "Zswap: %8lu kB\n",
- (unsigned long)(zswap_pool_total_size >> 10));
+ show_val_kb(m, "Zswap: ", zswap_total_pages());
seq_printf(m, "Zswapped: %8lu kB\n",
(unsigned long)atomic_read(&zswap_stored_pages) <<
(PAGE_SHIFT - 10));
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 195b077c0fac..2fb64bdb64eb 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -67,7 +67,7 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
*/
ppage = pfn_to_online_page(pfn);
- if (!ppage || PageSlab(ppage) || page_has_type(ppage))
+ if (!ppage)
pcount = 0;
else
pcount = page_mapcount(ppage);
@@ -107,10 +107,13 @@ static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit)
return ((kflags >> kbit) & 1) << ubit;
}
-u64 stable_page_flags(struct page *page)
+u64 stable_page_flags(const struct page *page)
{
- u64 k;
- u64 u;
+ const struct folio *folio;
+ unsigned long k;
+ unsigned long mapping;
+ bool is_anon;
+ u64 u = 0;
/*
* pseudo flag: KPF_NOPAGE
@@ -118,52 +121,47 @@ u64 stable_page_flags(struct page *page)
*/
if (!page)
return 1 << KPF_NOPAGE;
+ folio = page_folio(page);
- k = page->flags;
- u = 0;
+ k = folio->flags;
+ mapping = (unsigned long)folio->mapping;
+ is_anon = mapping & PAGE_MAPPING_ANON;
/*
* pseudo flags for the well known (anonymous) memory mapped pages
- *
- * Note that page->_mapcount is overloaded in SLAB, so the
- * simple test in page_mapped() is not enough.
*/
- if (!PageSlab(page) && page_mapped(page))
+ if (page_mapped(page))
u |= 1 << KPF_MMAP;
- if (PageAnon(page))
+ if (is_anon) {
u |= 1 << KPF_ANON;
- if (PageKsm(page))
- u |= 1 << KPF_KSM;
+ if (mapping & PAGE_MAPPING_KSM)
+ u |= 1 << KPF_KSM;
+ }
/*
* compound pages: export both head/tail info
* they together define a compound page's start/end pos and order
*/
- if (PageHead(page))
- u |= 1 << KPF_COMPOUND_HEAD;
- if (PageTail(page))
+ if (page == &folio->page)
+ u |= kpf_copy_bit(k, KPF_COMPOUND_HEAD, PG_head);
+ else
u |= 1 << KPF_COMPOUND_TAIL;
- if (PageHuge(page))
+ if (folio_test_hugetlb(folio))
u |= 1 << KPF_HUGE;
/*
- * PageTransCompound can be true for non-huge compound pages (slab
- * pages or pages allocated by drivers with __GFP_COMP) because it
- * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon
+ * We need to check PageLRU/PageAnon
* to make sure a given page is a thp, not a non-huge compound page.
*/
- else if (PageTransCompound(page)) {
- struct page *head = compound_head(page);
-
- if (PageLRU(head) || PageAnon(head))
+ else if (folio_test_large(folio)) {
+ if ((k & (1 << PG_lru)) || is_anon)
u |= 1 << KPF_THP;
- else if (is_huge_zero_page(head)) {
+ else if (is_huge_zero_folio(folio)) {
u |= 1 << KPF_ZERO_PAGE;
u |= 1 << KPF_THP;
}
} else if (is_zero_pfn(page_to_pfn(page)))
u |= 1 << KPF_ZERO_PAGE;
-
/*
* Caveats on high order pages: PG_buddy and PG_slab will only be set
* on the head page.
@@ -177,16 +175,17 @@ u64 stable_page_flags(struct page *page)
u |= 1 << KPF_OFFLINE;
if (PageTable(page))
u |= 1 << KPF_PGTABLE;
+ if (folio_test_slab(folio))
+ u |= 1 << KPF_SLAB;
- if (page_is_idle(page))
+#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
+ u |= kpf_copy_bit(k, KPF_IDLE, PG_idle);
+#else
+ if (folio_test_idle(folio))
u |= 1 << KPF_IDLE;
+#endif
u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked);
-
- u |= kpf_copy_bit(k, KPF_SLAB, PG_slab);
- if (PageTail(page) && PageSlab(page))
- u |= 1 << KPF_SLAB;
-
u |= kpf_copy_bit(k, KPF_ERROR, PG_error);
u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty);
u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate);
@@ -197,7 +196,8 @@ u64 stable_page_flags(struct page *page)
u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active);
u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim);
- if (PageSwapCache(page))
+#define SWAPCACHE ((1 << PG_swapbacked) | (1 << PG_swapcache))
+ if ((k & SWAPCACHE) == SWAPCACHE)
u |= 1 << KPF_SWAPCACHE;
u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked);
@@ -205,7 +205,10 @@ u64 stable_page_flags(struct page *page)
u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked);
#ifdef CONFIG_MEMORY_FAILURE
- u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison);
+ if (u & (1 << KPF_HUGE))
+ u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison);
+ else
+ u |= kpf_copy_bit(page->flags, KPF_HWPOISON, PG_hwpoison);
#endif
#ifdef CONFIG_ARCH_USES_PG_UNCACHED
@@ -231,7 +234,6 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
{
const unsigned long max_dump_pfn = get_max_dump_pfn();
u64 __user *out = (u64 __user *)buf;
- struct page *ppage;
unsigned long src = *ppos;
unsigned long pfn;
ssize_t ret = 0;
@@ -248,9 +250,9 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
* TODO: ZONE_DEVICE support requires to identify
* memmaps that were actually initialized.
*/
- ppage = pfn_to_online_page(pfn);
+ struct page *page = pfn_to_online_page(pfn);
- if (put_user(stable_page_flags(ppage), out)) {
+ if (put_user(stable_page_flags(page), out)) {
ret = -EFAULT;
break;
}
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index 2ba31b6d68c0..52f0b75cbce2 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -135,6 +135,7 @@ EXPORT_SYMBOL_GPL(proc_create_net_data);
* @parent: The parent directory in which to create.
* @ops: The seq_file ops with which to read the file.
* @write: The write method with which to 'modify' the file.
+ * @state_size: The size of the per-file private state to allocate.
* @data: Data for retrieval by pde_data().
*
* Create a network namespaced proc file in the @parent directory with the
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 37cde0efee57..b1c2c0b82116 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -30,9 +30,7 @@ static const struct file_operations proc_sys_dir_file_operations;
static const struct inode_operations proc_sys_dir_operations;
/* Support for permanently empty directories */
-static struct ctl_table sysctl_mount_point[] = {
- {.type = SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY }
-};
+static struct ctl_table sysctl_mount_point[] = { };
/**
* register_sysctl_mount_point() - registers a sysctl mount point
@@ -48,14 +46,12 @@ struct ctl_table_header *register_sysctl_mount_point(const char *path)
}
EXPORT_SYMBOL(register_sysctl_mount_point);
-#define sysctl_is_perm_empty_ctl_table(tptr) \
- (tptr[0].type == SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY)
#define sysctl_is_perm_empty_ctl_header(hptr) \
- (sysctl_is_perm_empty_ctl_table(hptr->ctl_table))
+ (hptr->type == SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY)
#define sysctl_set_perm_empty_ctl_header(hptr) \
- (hptr->ctl_table[0].type = SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY)
+ (hptr->type = SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY)
#define sysctl_clear_perm_empty_ctl_header(hptr) \
- (hptr->ctl_table[0].type = SYSCTL_TABLE_TYPE_DEFAULT)
+ (hptr->type = SYSCTL_TABLE_TYPE_DEFAULT)
void proc_sys_poll_notify(struct ctl_table_poll *poll)
{
@@ -210,6 +206,8 @@ static void init_header(struct ctl_table_header *head,
node++;
}
}
+ if (table == sysctl_mount_point)
+ sysctl_set_perm_empty_ctl_header(head);
}
static void erase_header(struct ctl_table_header *head)
@@ -232,8 +230,7 @@ static int insert_header(struct ctl_dir *dir, struct ctl_table_header *header)
return -EROFS;
/* Am I creating a permanently empty directory? */
- if (header->ctl_table_size > 0 &&
- sysctl_is_perm_empty_ctl_table(header->ctl_table)) {
+ if (sysctl_is_perm_empty_ctl_header(header)) {
if (!RB_EMPTY_ROOT(&dir->root))
return -EINVAL;
sysctl_set_perm_empty_ctl_header(dir_h);
@@ -480,7 +477,7 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
}
if (root->set_ownership)
- root->set_ownership(head, table, &inode->i_uid, &inode->i_gid);
+ root->set_ownership(head, &inode->i_uid, &inode->i_gid);
else {
inode->i_uid = GLOBAL_ROOT_UID;
inode->i_gid = GLOBAL_ROOT_GID;
@@ -1204,7 +1201,7 @@ static bool get_links(struct ctl_dir *dir,
struct ctl_table *entry, *link;
if (header->ctl_table_size == 0 ||
- sysctl_is_perm_empty_ctl_table(header->ctl_table))
+ sysctl_is_perm_empty_ctl_header(header))
return true;
/* Are there links available for every entry in table? */
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 23fbab954c20..e5a5f015ff03 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -411,14 +411,14 @@ struct mem_size_stats {
};
static void smaps_page_accumulate(struct mem_size_stats *mss,
- struct page *page, unsigned long size, unsigned long pss,
+ struct folio *folio, unsigned long size, unsigned long pss,
bool dirty, bool locked, bool private)
{
mss->pss += pss;
- if (PageAnon(page))
+ if (folio_test_anon(folio))
mss->pss_anon += pss;
- else if (PageSwapBacked(page))
+ else if (folio_test_swapbacked(folio))
mss->pss_shmem += pss;
else
mss->pss_file += pss;
@@ -426,7 +426,7 @@ static void smaps_page_accumulate(struct mem_size_stats *mss,
if (locked)
mss->pss_locked += pss;
- if (dirty || PageDirty(page)) {
+ if (dirty || folio_test_dirty(folio)) {
mss->pss_dirty += pss;
if (private)
mss->private_dirty += size;
@@ -444,6 +444,7 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
bool compound, bool young, bool dirty, bool locked,
bool migration)
{
+ struct folio *folio = page_folio(page);
int i, nr = compound ? compound_nr(page) : 1;
unsigned long size = nr * PAGE_SIZE;
@@ -451,27 +452,28 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
* First accumulate quantities that depend only on |size| and the type
* of the compound page.
*/
- if (PageAnon(page)) {
+ if (folio_test_anon(folio)) {
mss->anonymous += size;
- if (!PageSwapBacked(page) && !dirty && !PageDirty(page))
+ if (!folio_test_swapbacked(folio) && !dirty &&
+ !folio_test_dirty(folio))
mss->lazyfree += size;
}
- if (PageKsm(page))
+ if (folio_test_ksm(folio))
mss->ksm += size;
mss->resident += size;
/* Accumulate the size in pages that have been accessed. */
- if (young || page_is_young(page) || PageReferenced(page))
+ if (young || folio_test_young(folio) || folio_test_referenced(folio))
mss->referenced += size;
/*
* Then accumulate quantities that may depend on sharing, or that may
* differ page-by-page.
*
- * page_count(page) == 1 guarantees the page is mapped exactly once.
+ * refcount == 1 guarantees the page is mapped exactly once.
* If any subpage of the compound page mapped with PTE it would elevate
- * page_count().
+ * the refcount.
*
* The page_mapcount() is called to get a snapshot of the mapcount.
* Without holding the page lock this snapshot can be slightly wrong as
@@ -480,9 +482,9 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
* especially for migration entries. Treat regular migration entries
* as mapcount == 1.
*/
- if ((page_count(page) == 1) || migration) {
- smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty,
- locked, true);
+ if ((folio_ref_count(folio) == 1) || migration) {
+ smaps_page_accumulate(mss, folio, size, size << PSS_SHIFT,
+ dirty, locked, true);
return;
}
for (i = 0; i < nr; i++, page++) {
@@ -490,8 +492,8 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
unsigned long pss = PAGE_SIZE << PSS_SHIFT;
if (mapcount >= 2)
pss /= mapcount;
- smaps_page_accumulate(mss, page, PAGE_SIZE, pss, dirty, locked,
- mapcount < 2);
+ smaps_page_accumulate(mss, folio, PAGE_SIZE, pss,
+ dirty, locked, mapcount < 2);
}
}
@@ -576,6 +578,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
struct vm_area_struct *vma = walk->vma;
bool locked = !!(vma->vm_flags & VM_LOCKED);
struct page *page = NULL;
+ struct folio *folio;
bool migration = false;
if (pmd_present(*pmd)) {
@@ -590,11 +593,12 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
}
if (IS_ERR_OR_NULL(page))
return;
- if (PageAnon(page))
+ folio = page_folio(page);
+ if (folio_test_anon(folio))
mss->anonymous_thp += HPAGE_PMD_SIZE;
- else if (PageSwapBacked(page))
+ else if (folio_test_swapbacked(folio))
mss->shmem_thp += HPAGE_PMD_SIZE;
- else if (is_zone_device_page(page))
+ else if (folio_is_zone_device(folio))
/* pass */;
else
mss->file_thp += HPAGE_PMD_SIZE;
@@ -726,19 +730,20 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
{
struct mem_size_stats *mss = walk->private;
struct vm_area_struct *vma = walk->vma;
- struct page *page = NULL;
- pte_t ptent = ptep_get(pte);
+ pte_t ptent = huge_ptep_get(pte);
+ struct folio *folio = NULL;
if (pte_present(ptent)) {
- page = vm_normal_page(vma, addr, ptent);
+ folio = page_folio(pte_page(ptent));
} else if (is_swap_pte(ptent)) {
swp_entry_t swpent = pte_to_swp_entry(ptent);
if (is_pfn_swap_entry(swpent))
- page = pfn_swap_entry_to_page(swpent);
+ folio = pfn_swap_entry_folio(swpent);
}
- if (page) {
- if (page_mapcount(page) >= 2 || hugetlb_pmd_shared(pte))
+ if (folio) {
+ if (folio_likely_mapped_shared(folio) ||
+ hugetlb_pmd_shared(pte))
mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
else
mss->private_hugetlb += huge_page_size(hstate_vma(vma));
@@ -866,8 +871,8 @@ static int show_smap(struct seq_file *m, void *v)
__show_smap(m, &mss, false);
seq_printf(m, "THPeligible: %8u\n",
- !!thp_vma_allowable_orders(vma, vma->vm_flags, true, false,
- true, THP_ORDERS_ALL));
+ !!thp_vma_allowable_orders(vma, vma->vm_flags,
+ TVA_SMAPS | TVA_ENFORCE_SYSFS, THP_ORDERS_ALL));
if (arch_pkeys_enabled())
seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
@@ -1161,7 +1166,7 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
struct vm_area_struct *vma = walk->vma;
pte_t *pte, ptent;
spinlock_t *ptl;
- struct page *page;
+ struct folio *folio;
ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
@@ -1173,12 +1178,12 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
if (!pmd_present(*pmd))
goto out;
- page = pmd_page(*pmd);
+ folio = pmd_folio(*pmd);
/* Clear accessed and referenced bits. */
pmdp_test_and_clear_young(vma, addr, pmd);
- test_and_clear_page_young(page);
- ClearPageReferenced(page);
+ folio_test_clear_young(folio);
+ folio_clear_referenced(folio);
out:
spin_unlock(ptl);
return 0;
@@ -1200,14 +1205,14 @@ out:
if (!pte_present(ptent))
continue;
- page = vm_normal_page(vma, addr, ptent);
- if (!page)
+ folio = vm_normal_folio(vma, addr, ptent);
+ if (!folio)
continue;
/* Clear accessed and referenced bits. */
ptep_test_and_clear_young(vma, addr, pte);
- test_and_clear_page_young(page);
- ClearPageReferenced(page);
+ folio_test_clear_young(folio);
+ folio_clear_referenced(folio);
}
pte_unmap_unlock(pte - 1, ptl);
cond_resched();
@@ -1574,12 +1579,13 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
pte = huge_ptep_get(ptep);
if (pte_present(pte)) {
- struct page *page = pte_page(pte);
+ struct folio *folio = page_folio(pte_page(pte));
- if (!PageAnon(page))
+ if (!folio_test_anon(folio))
flags |= PM_FILE;
- if (page_mapcount(page) == 1)
+ if (!folio_likely_mapped_shared(folio) &&
+ !hugetlb_pmd_shared(ptep))
flags |= PM_MMAP_EXCLUSIVE;
if (huge_pte_uffd_wp(pte))
@@ -1817,15 +1823,13 @@ static unsigned long pagemap_page_category(struct pagemap_scan_private *p,
}
static void make_uffd_wp_pte(struct vm_area_struct *vma,
- unsigned long addr, pte_t *pte)
+ unsigned long addr, pte_t *pte, pte_t ptent)
{
- pte_t ptent = ptep_get(pte);
-
if (pte_present(ptent)) {
pte_t old_pte;
old_pte = ptep_modify_prot_start(vma, addr, pte);
- ptent = pte_mkuffd_wp(ptent);
+ ptent = pte_mkuffd_wp(old_pte);
ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
} else if (is_swap_pte(ptent)) {
ptent = pte_swp_mkuffd_wp(ptent);
@@ -2175,9 +2179,12 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) {
/* Fast path for performing exclusive WP */
for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
- if (pte_uffd_wp(ptep_get(pte)))
+ pte_t ptent = ptep_get(pte);
+
+ if ((pte_present(ptent) && pte_uffd_wp(ptent)) ||
+ pte_swp_uffd_wp_any(ptent))
continue;
- make_uffd_wp_pte(vma, addr, pte);
+ make_uffd_wp_pte(vma, addr, pte, ptent);
if (!flush_end)
start = addr;
flush_end = addr + PAGE_SIZE;
@@ -2190,8 +2197,10 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
p->arg.return_mask == PAGE_IS_WRITTEN) {
for (addr = start; addr < end; pte++, addr += PAGE_SIZE) {
unsigned long next = addr + PAGE_SIZE;
+ pte_t ptent = ptep_get(pte);
- if (pte_uffd_wp(ptep_get(pte)))
+ if ((pte_present(ptent) && pte_uffd_wp(ptent)) ||
+ pte_swp_uffd_wp_any(ptent))
continue;
ret = pagemap_scan_output(p->cur_vma_category | PAGE_IS_WRITTEN,
p, addr, &next);
@@ -2199,7 +2208,7 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
break;
if (~p->arg.flags & PM_SCAN_WP_MATCHING)
continue;
- make_uffd_wp_pte(vma, addr, pte);
+ make_uffd_wp_pte(vma, addr, pte, ptent);
if (!flush_end)
start = addr;
flush_end = next;
@@ -2208,8 +2217,9 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
}
for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
+ pte_t ptent = ptep_get(pte);
unsigned long categories = p->cur_vma_category |
- pagemap_page_category(p, vma, addr, ptep_get(pte));
+ pagemap_page_category(p, vma, addr, ptent);
unsigned long next = addr + PAGE_SIZE;
if (!pagemap_scan_is_interesting_page(categories, p))
@@ -2224,7 +2234,7 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
if (~categories & PAGE_IS_WRITTEN)
continue;
- make_uffd_wp_pte(vma, addr, pte);
+ make_uffd_wp_pte(vma, addr, pte, ptent);
if (!flush_end)
start = addr;
flush_end = next;
@@ -2547,28 +2557,29 @@ struct numa_maps_private {
static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
unsigned long nr_pages)
{
+ struct folio *folio = page_folio(page);
int count = page_mapcount(page);
md->pages += nr_pages;
- if (pte_dirty || PageDirty(page))
+ if (pte_dirty || folio_test_dirty(folio))
md->dirty += nr_pages;
- if (PageSwapCache(page))
+ if (folio_test_swapcache(folio))
md->swapcache += nr_pages;
- if (PageActive(page) || PageUnevictable(page))
+ if (folio_test_active(folio) || folio_test_unevictable(folio))
md->active += nr_pages;
- if (PageWriteback(page))
+ if (folio_test_writeback(folio))
md->writeback += nr_pages;
- if (PageAnon(page))
+ if (folio_test_anon(folio))
md->anon += nr_pages;
if (count > md->mapcount_max)
md->mapcount_max = count;
- md->node[page_to_nid(page)] += nr_pages;
+ md->node[folio_nid(folio)] += nr_pages;
}
static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 1fb213f379a5..b52d85f8ad59 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -383,6 +383,8 @@ static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos)
/* leave now if filled buffer already */
if (!iov_iter_count(iter))
return acc;
+
+ cond_resched();
}
list_for_each_entry(m, &vmcore_list, list) {
@@ -1370,9 +1372,8 @@ static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
vdd_hdr->n_type = NT_VMCOREDD;
- strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME,
- sizeof(vdd_hdr->name));
- memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name));
+ strscpy_pad(vdd_hdr->name, VMCOREDD_NOTE_NAME);
+ strscpy_pad(vdd_hdr->dump_name, data->dump_name);
}
/**
diff --git a/fs/qnx6/inode.c b/fs/qnx6/inode.c
index 405913f4faff..d62fbef838b6 100644
--- a/fs/qnx6/inode.c
+++ b/fs/qnx6/inode.c
@@ -19,11 +19,11 @@
#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/statfs.h>
-#include <linux/parser.h>
#include <linux/seq_file.h>
-#include <linux/mount.h>
#include <linux/crc32.h>
#include <linux/mpage.h>
+#include <linux/fs_parser.h>
+#include <linux/fs_context.h>
#include "qnx6.h"
static const struct super_operations qnx6_sops;
@@ -31,7 +31,7 @@ static const struct super_operations qnx6_sops;
static void qnx6_put_super(struct super_block *sb);
static struct inode *qnx6_alloc_inode(struct super_block *sb);
static void qnx6_free_inode(struct inode *inode);
-static int qnx6_remount(struct super_block *sb, int *flags, char *data);
+static int qnx6_reconfigure(struct fs_context *fc);
static int qnx6_statfs(struct dentry *dentry, struct kstatfs *buf);
static int qnx6_show_options(struct seq_file *seq, struct dentry *root);
@@ -40,7 +40,6 @@ static const struct super_operations qnx6_sops = {
.free_inode = qnx6_free_inode,
.put_super = qnx6_put_super,
.statfs = qnx6_statfs,
- .remount_fs = qnx6_remount,
.show_options = qnx6_show_options,
};
@@ -54,10 +53,12 @@ static int qnx6_show_options(struct seq_file *seq, struct dentry *root)
return 0;
}
-static int qnx6_remount(struct super_block *sb, int *flags, char *data)
+static int qnx6_reconfigure(struct fs_context *fc)
{
+ struct super_block *sb = fc->root->d_sb;
+
sync_filesystem(sb);
- *flags |= SB_RDONLY;
+ fc->sb_flags |= SB_RDONLY;
return 0;
}
@@ -218,39 +219,36 @@ void qnx6_superblock_debug(struct qnx6_super_block *sb, struct super_block *s)
#endif
enum {
- Opt_mmifs,
- Opt_err
+ Opt_mmifs
+};
+
+struct qnx6_context {
+ unsigned long s_mount_opts;
};
-static const match_table_t tokens = {
- {Opt_mmifs, "mmi_fs"},
- {Opt_err, NULL}
+static const struct fs_parameter_spec qnx6_param_spec[] = {
+ fsparam_flag ("mmi_fs", Opt_mmifs),
+ {}
};
-static int qnx6_parse_options(char *options, struct super_block *sb)
+static int qnx6_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- char *p;
- struct qnx6_sb_info *sbi = QNX6_SB(sb);
- substring_t args[MAX_OPT_ARGS];
-
- if (!options)
- return 1;
-
- while ((p = strsep(&options, ",")) != NULL) {
- int token;
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_mmifs:
- set_opt(sbi->s_mount_opt, MMI_FS);
- break;
- default:
- return 0;
- }
+ struct qnx6_context *ctx = fc->fs_private;
+ struct fs_parse_result result;
+ int opt;
+
+ opt = fs_parse(fc, qnx6_param_spec, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_mmifs:
+ ctx->s_mount_opts |= QNX6_MOUNT_MMI_FS;
+ break;
+ default:
+ return -EINVAL;
}
- return 1;
+ return 0;
}
static struct buffer_head *qnx6_check_first_superblock(struct super_block *s,
@@ -293,22 +291,25 @@ static struct buffer_head *qnx6_check_first_superblock(struct super_block *s,
static struct inode *qnx6_private_inode(struct super_block *s,
struct qnx6_root_node *p);
-static int qnx6_fill_super(struct super_block *s, void *data, int silent)
+static int qnx6_fill_super(struct super_block *s, struct fs_context *fc)
{
struct buffer_head *bh1 = NULL, *bh2 = NULL;
struct qnx6_super_block *sb1 = NULL, *sb2 = NULL;
struct qnx6_sb_info *sbi;
+ struct qnx6_context *ctx = fc->fs_private;
struct inode *root;
const char *errmsg;
struct qnx6_sb_info *qs;
int ret = -EINVAL;
u64 offset;
int bootblock_offset = QNX6_BOOTBLOCK_SIZE;
+ int silent = fc->sb_flags & SB_SILENT;
qs = kzalloc(sizeof(struct qnx6_sb_info), GFP_KERNEL);
if (!qs)
return -ENOMEM;
s->s_fs_info = qs;
+ qs->s_mount_opt = ctx->s_mount_opts;
/* Superblock always is 512 Byte long */
if (!sb_set_blocksize(s, QNX6_SUPERBLOCK_SIZE)) {
@@ -316,12 +317,7 @@ static int qnx6_fill_super(struct super_block *s, void *data, int silent)
goto outnobh;
}
- /* parse the mount-options */
- if (!qnx6_parse_options((char *) data, s)) {
- pr_err("invalid mount options.\n");
- goto outnobh;
- }
- if (test_opt(s, MMI_FS)) {
+ if (qs->s_mount_opt == QNX6_MOUNT_MMI_FS) {
sb1 = qnx6_mmi_fill_super(s, silent);
if (sb1)
goto mmi_success;
@@ -632,18 +628,43 @@ static void destroy_inodecache(void)
kmem_cache_destroy(qnx6_inode_cachep);
}
-static struct dentry *qnx6_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int qnx6_get_tree(struct fs_context *fc)
+{
+ return get_tree_bdev(fc, qnx6_fill_super);
+}
+
+static void qnx6_free_fc(struct fs_context *fc)
{
- return mount_bdev(fs_type, flags, dev_name, data, qnx6_fill_super);
+ kfree(fc->fs_private);
+}
+
+static const struct fs_context_operations qnx6_context_ops = {
+ .parse_param = qnx6_parse_param,
+ .get_tree = qnx6_get_tree,
+ .reconfigure = qnx6_reconfigure,
+ .free = qnx6_free_fc,
+};
+
+static int qnx6_init_fs_context(struct fs_context *fc)
+{
+ struct qnx6_context *ctx;
+
+ ctx = kzalloc(sizeof(struct qnx6_context), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ fc->ops = &qnx6_context_ops;
+ fc->fs_private = ctx;
+
+ return 0;
}
static struct file_system_type qnx6_fs_type = {
- .owner = THIS_MODULE,
- .name = "qnx6",
- .mount = qnx6_mount,
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
+ .owner = THIS_MODULE,
+ .name = "qnx6",
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+ .init_fs_context = qnx6_init_fs_context,
+ .parameters = qnx6_param_spec,
};
MODULE_ALIAS_FS("qnx6");
diff --git a/fs/ramfs/file-mmu.c b/fs/ramfs/file-mmu.c
index c7a1aa3c882b..b45c7edc3225 100644
--- a/fs/ramfs/file-mmu.c
+++ b/fs/ramfs/file-mmu.c
@@ -35,7 +35,7 @@ static unsigned long ramfs_mmu_get_unmapped_area(struct file *file,
unsigned long addr, unsigned long len, unsigned long pgoff,
unsigned long flags)
{
- return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+ return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
}
const struct file_operations ramfs_file_operations = {
diff --git a/fs/read_write.c b/fs/read_write.c
index d4c036e82b6c..2115d1f40bd5 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1685,7 +1685,7 @@ int generic_write_checks_count(struct kiocb *iocb, loff_t *count)
if ((iocb->ki_flags & IOCB_NOWAIT) &&
!((iocb->ki_flags & IOCB_DIRECT) ||
- (file->f_mode & FMODE_BUF_WASYNC)))
+ (file->f_op->fop_flags & FOP_BUFFER_WASYNC)))
return -EINVAL;
return generic_write_check_limits(iocb->ki_filp, iocb->ki_pos, count);
diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
index 3a5a752d96c7..5011c10287c6 100644
--- a/fs/reiserfs/item_ops.c
+++ b/fs/reiserfs/item_ops.c
@@ -389,16 +389,9 @@ static void direntry_print_item(struct item_head *ih, char *item)
name = item + deh_location(deh);
if (name[namelen - 1] == 0)
namelen = strlen(name);
- namebuf[0] = '"';
- if (namelen > sizeof(namebuf) - 3) {
- strncpy(namebuf + 1, name, sizeof(namebuf) - 3);
- namebuf[sizeof(namebuf) - 2] = '"';
- namebuf[sizeof(namebuf) - 1] = 0;
- } else {
- memcpy(namebuf + 1, name, namelen);
- namebuf[namelen + 1] = '"';
- namebuf[namelen + 2] = 0;
- }
+
+ scnprintf(namebuf, sizeof(namebuf), "\"%.*s\"",
+ (int)sizeof(namebuf)-3, name);
printk("%d: %-15s%-15d%-15d%-15lld%-15lld(%s)\n",
i, namebuf,
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 6474529c4253..e539ccd39e1e 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -2589,7 +2589,7 @@ static void journal_list_init(struct super_block *sb)
static void release_journal_dev(struct reiserfs_journal *journal)
{
if (journal->j_bdev_file) {
- fput(journal->j_bdev_file);
+ bdev_fput(journal->j_bdev_file);
journal->j_bdev_file = NULL;
}
}
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index 2be227532f39..2cbb92462074 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -594,7 +594,7 @@ static void romfs_kill_sb(struct super_block *sb)
#ifdef CONFIG_ROMFS_ON_BLOCK
if (sb->s_bdev) {
sync_blockdev(sb->s_bdev);
- fput(sb->s_bdev_file);
+ bdev_fput(sb->s_bdev_file);
}
#endif
}
diff --git a/fs/seq_file.c b/fs/seq_file.c
index f5fdaf3b1572..e676c8b0cf5d 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -669,18 +669,11 @@ void seq_putc(struct seq_file *m, char c)
}
EXPORT_SYMBOL(seq_putc);
-void seq_puts(struct seq_file *m, const char *s)
+void __seq_puts(struct seq_file *m, const char *s)
{
- int len = strlen(s);
-
- if (m->count + len >= m->size) {
- seq_set_overflow(m);
- return;
- }
- memcpy(m->buf + m->count, s, len);
- m->count += len;
+ seq_write(m, s, strlen(s));
}
-EXPORT_SYMBOL(seq_puts);
+EXPORT_SYMBOL(__seq_puts);
/**
* seq_put_decimal_ull_width - A helper routine for putting decimal numbers
diff --git a/fs/signalfd.c b/fs/signalfd.c
index e20d1484c663..4a5614442dbf 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -68,8 +68,7 @@ static __poll_t signalfd_poll(struct file *file, poll_table *wait)
/*
* Copied from copy_siginfo_to_user() in kernel/signal.c
*/
-static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
- kernel_siginfo_t const *kinfo)
+static int signalfd_copyinfo(struct iov_iter *to, kernel_siginfo_t const *kinfo)
{
struct signalfd_siginfo new;
@@ -146,10 +145,10 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
break;
}
- if (copy_to_user(uinfo, &new, sizeof(struct signalfd_siginfo)))
+ if (!copy_to_iter_full(&new, sizeof(struct signalfd_siginfo), to))
return -EFAULT;
- return sizeof(*uinfo);
+ return sizeof(struct signalfd_siginfo);
}
static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, kernel_siginfo_t *info,
@@ -199,28 +198,27 @@ static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, kernel_siginfo_t *info
* error code. The "count" parameter must be at least the size of a
* "struct signalfd_siginfo".
*/
-static ssize_t signalfd_read(struct file *file, char __user *buf, size_t count,
- loff_t *ppos)
+static ssize_t signalfd_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
+ struct file *file = iocb->ki_filp;
struct signalfd_ctx *ctx = file->private_data;
- struct signalfd_siginfo __user *siginfo;
- int nonblock = file->f_flags & O_NONBLOCK;
+ size_t count = iov_iter_count(to);
ssize_t ret, total = 0;
kernel_siginfo_t info;
+ bool nonblock;
count /= sizeof(struct signalfd_siginfo);
if (!count)
return -EINVAL;
- siginfo = (struct signalfd_siginfo __user *) buf;
+ nonblock = file->f_flags & O_NONBLOCK || iocb->ki_flags & IOCB_NOWAIT;
do {
ret = signalfd_dequeue(ctx, &info, nonblock);
if (unlikely(ret <= 0))
break;
- ret = signalfd_copyinfo(siginfo, &info);
+ ret = signalfd_copyinfo(to, &info);
if (ret < 0)
break;
- siginfo++;
total += ret;
nonblock = 1;
} while (--count);
@@ -246,7 +244,7 @@ static const struct file_operations signalfd_fops = {
#endif
.release = signalfd_release,
.poll = signalfd_poll,
- .read = signalfd_read,
+ .read_iter = signalfd_read_iter,
.llseek = noop_llseek,
};
@@ -265,20 +263,34 @@ static int do_signalfd4(int ufd, sigset_t *mask, int flags)
signotset(mask);
if (ufd == -1) {
+ struct file *file;
+
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->sigmask = *mask;
+ ufd = get_unused_fd_flags(flags & O_CLOEXEC);
+ if (ufd < 0) {
+ kfree(ctx);
+ return ufd;
+ }
+
+ file = anon_inode_getfile("[signalfd]", &signalfd_fops, ctx,
+ O_RDWR | (flags & O_NONBLOCK));
+ if (IS_ERR(file)) {
+ put_unused_fd(ufd);
+ kfree(ctx);
+ return ufd;
+ }
+ file->f_mode |= FMODE_NOWAIT;
+
/*
* When we call this, the initialization must be complete, since
* anon_inode_getfd() will install the fd.
*/
- ufd = anon_inode_getfd("[signalfd]", &signalfd_fops, ctx,
- O_RDWR | (flags & (O_CLOEXEC | O_NONBLOCK)));
- if (ufd < 0)
- kfree(ctx);
+ fd_install(ufd, file);
} else {
struct fd f = fdget(ufd);
if (!f.file)
diff --git a/fs/smb/client/Kconfig b/fs/smb/client/Kconfig
index 2927bd174a88..2517dc242386 100644
--- a/fs/smb/client/Kconfig
+++ b/fs/smb/client/Kconfig
@@ -2,6 +2,7 @@
config CIFS
tristate "SMB3 and CIFS support (advanced network filesystem)"
depends on INET
+ select NETFS_SUPPORT
select NLS
select NLS_UCS2_UTILS
select CRYPTO
diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
index a0017724d523..0ff2491c311d 100644
--- a/fs/smb/client/cached_dir.c
+++ b/fs/smb/client/cached_dir.c
@@ -417,6 +417,7 @@ smb2_close_cached_fid(struct kref *ref)
{
struct cached_fid *cfid = container_of(ref, struct cached_fid,
refcount);
+ int rc;
spin_lock(&cfid->cfids->cfid_list_lock);
if (cfid->on_list) {
@@ -430,9 +431,10 @@ smb2_close_cached_fid(struct kref *ref)
cfid->dentry = NULL;
if (cfid->is_open) {
- SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
+ rc = SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
cfid->fid.volatile_fid);
- atomic_dec(&cfid->tcon->num_remote_opens);
+ if (rc) /* should we retry on -EBUSY or -EAGAIN? */
+ cifs_dbg(VFS, "close cached dir rc %d\n", rc);
}
free_cached_dir(cfid);
diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
index 226d4835c92d..c71ae5c04306 100644
--- a/fs/smb/client/cifs_debug.c
+++ b/fs/smb/client/cifs_debug.c
@@ -250,6 +250,8 @@ static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ if (cifs_ses_exiting(ses))
+ continue;
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
spin_lock(&tcon->open_file_lock);
list_for_each_entry(cfile, &tcon->openFileList, tlist) {
@@ -676,6 +678,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
}
#endif /* CONFIG_CIFS_STATS2 */
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ if (cifs_ses_exiting(ses))
+ continue;
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
atomic_set(&tcon->num_smbs_sent, 0);
spin_lock(&tcon->stat_lock);
@@ -755,6 +759,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
}
#endif /* STATS2 */
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ if (cifs_ses_exiting(ses))
+ continue;
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
i++;
seq_printf(m, "\n%d) %s", i, tcon->tree_name);
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index aa6f1ecb7c0e..ec5b639f421a 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -156,6 +156,7 @@ struct workqueue_struct *decrypt_wq;
struct workqueue_struct *fileinfo_put_wq;
struct workqueue_struct *cifsoplockd_wq;
struct workqueue_struct *deferredclose_wq;
+struct workqueue_struct *serverclose_wq;
__u32 cifs_lock_secret;
/*
@@ -370,9 +371,13 @@ static struct kmem_cache *cifs_inode_cachep;
static struct kmem_cache *cifs_req_cachep;
static struct kmem_cache *cifs_mid_cachep;
static struct kmem_cache *cifs_sm_req_cachep;
+static struct kmem_cache *cifs_io_request_cachep;
+static struct kmem_cache *cifs_io_subrequest_cachep;
mempool_t *cifs_sm_req_poolp;
mempool_t *cifs_req_poolp;
mempool_t *cifs_mid_poolp;
+mempool_t cifs_io_request_pool;
+mempool_t cifs_io_subrequest_pool;
static struct inode *
cifs_alloc_inode(struct super_block *sb)
@@ -388,6 +393,7 @@ cifs_alloc_inode(struct super_block *sb)
* server, can not assume caching of file data or metadata.
*/
cifs_set_oplock_level(cifs_inode, 0);
+ cifs_inode->lease_granted = false;
cifs_inode->flags = 0;
spin_lock_init(&cifs_inode->writers_lock);
cifs_inode->writers = 0;
@@ -738,6 +744,8 @@ static void cifs_umount_begin(struct super_block *sb)
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&tcon->tc_lock);
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+ netfs_trace_tcon_ref_see_umount);
if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
/* we have other mounts to same share or we have
already tried to umount this and woken up
@@ -982,61 +990,6 @@ out:
return root;
}
-
-static ssize_t
-cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
-{
- ssize_t rc;
- struct inode *inode = file_inode(iocb->ki_filp);
-
- if (iocb->ki_flags & IOCB_DIRECT)
- return cifs_user_readv(iocb, iter);
-
- rc = cifs_revalidate_mapping(inode);
- if (rc)
- return rc;
-
- return generic_file_read_iter(iocb, iter);
-}
-
-static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
-{
- struct inode *inode = file_inode(iocb->ki_filp);
- struct cifsInodeInfo *cinode = CIFS_I(inode);
- ssize_t written;
- int rc;
-
- if (iocb->ki_filp->f_flags & O_DIRECT) {
- written = cifs_user_writev(iocb, from);
- if (written > 0 && CIFS_CACHE_READ(cinode)) {
- cifs_zap_mapping(inode);
- cifs_dbg(FYI,
- "Set no oplock for inode=%p after a write operation\n",
- inode);
- cinode->oplock = 0;
- }
- return written;
- }
-
- written = cifs_get_writer(cinode);
- if (written)
- return written;
-
- written = generic_file_write_iter(iocb, from);
-
- if (CIFS_CACHE_WRITE(CIFS_I(inode)))
- goto out;
-
- rc = filemap_fdatawrite(inode->i_mapping);
- if (rc)
- cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
- rc, inode);
-
-out:
- cifs_put_writer(cinode);
- return written;
-}
-
static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
{
struct cifsFileInfo *cfile = file->private_data;
@@ -1338,6 +1291,8 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
if (rc)
goto unlock;
+ if (fend > target_cifsi->netfs.zero_point)
+ target_cifsi->netfs.zero_point = fend + 1;
/* Discard all the folios that overlap the destination region. */
cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
@@ -1356,6 +1311,8 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
fscache_resize_cookie(cifs_inode_cookie(target_inode),
new_size);
}
+ if (rc == 0 && new_size > target_cifsi->netfs.zero_point)
+ target_cifsi->netfs.zero_point = new_size;
}
/* force revalidate of size and timestamps of target file now
@@ -1447,6 +1404,8 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
if (rc)
goto unlock;
+ if (fend > target_cifsi->netfs.zero_point)
+ target_cifsi->netfs.zero_point = fend + 1;
/* Discard all the folios that overlap the destination region. */
truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
@@ -1563,8 +1522,8 @@ const struct file_operations cifs_file_strict_ops = {
};
const struct file_operations cifs_file_direct_ops = {
- .read_iter = cifs_direct_readv,
- .write_iter = cifs_direct_writev,
+ .read_iter = netfs_unbuffered_read_iter,
+ .write_iter = netfs_file_write_iter,
.open = cifs_open,
.release = cifs_close,
.lock = cifs_lock,
@@ -1619,8 +1578,8 @@ const struct file_operations cifs_file_strict_nobrl_ops = {
};
const struct file_operations cifs_file_direct_nobrl_ops = {
- .read_iter = cifs_direct_readv,
- .write_iter = cifs_direct_writev,
+ .read_iter = netfs_unbuffered_read_iter,
+ .write_iter = netfs_file_write_iter,
.open = cifs_open,
.release = cifs_close,
.fsync = cifs_fsync,
@@ -1795,6 +1754,48 @@ static void destroy_mids(void)
kmem_cache_destroy(cifs_mid_cachep);
}
+static int cifs_init_netfs(void)
+{
+ cifs_io_request_cachep =
+ kmem_cache_create("cifs_io_request",
+ sizeof(struct cifs_io_request), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!cifs_io_request_cachep)
+ goto nomem_req;
+
+ if (mempool_init_slab_pool(&cifs_io_request_pool, 100, cifs_io_request_cachep) < 0)
+ goto nomem_reqpool;
+
+ cifs_io_subrequest_cachep =
+ kmem_cache_create("cifs_io_subrequest",
+ sizeof(struct cifs_io_subrequest), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!cifs_io_subrequest_cachep)
+ goto nomem_subreq;
+
+ if (mempool_init_slab_pool(&cifs_io_subrequest_pool, 100, cifs_io_subrequest_cachep) < 0)
+ goto nomem_subreqpool;
+
+ return 0;
+
+nomem_subreqpool:
+ kmem_cache_destroy(cifs_io_subrequest_cachep);
+nomem_subreq:
+ mempool_destroy(&cifs_io_request_pool);
+nomem_reqpool:
+ kmem_cache_destroy(cifs_io_request_cachep);
+nomem_req:
+ return -ENOMEM;
+}
+
+static void cifs_destroy_netfs(void)
+{
+ mempool_exit(&cifs_io_subrequest_pool);
+ kmem_cache_destroy(cifs_io_subrequest_cachep);
+ mempool_exit(&cifs_io_request_pool);
+ kmem_cache_destroy(cifs_io_request_cachep);
+}
+
static int __init
init_cifs(void)
{
@@ -1888,14 +1889,25 @@ init_cifs(void)
goto out_destroy_cifsoplockd_wq;
}
+ serverclose_wq = alloc_workqueue("serverclose",
+ WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+ if (!serverclose_wq) {
+ rc = -ENOMEM;
+ goto out_destroy_serverclose_wq;
+ }
+
rc = cifs_init_inodecache();
if (rc)
goto out_destroy_deferredclose_wq;
- rc = init_mids();
+ rc = cifs_init_netfs();
if (rc)
goto out_destroy_inodecache;
+ rc = init_mids();
+ if (rc)
+ goto out_destroy_netfs;
+
rc = cifs_init_request_bufs();
if (rc)
goto out_destroy_mids;
@@ -1950,6 +1962,8 @@ out_destroy_request_bufs:
cifs_destroy_request_bufs();
out_destroy_mids:
destroy_mids();
+out_destroy_netfs:
+ cifs_destroy_netfs();
out_destroy_inodecache:
cifs_destroy_inodecache();
out_destroy_deferredclose_wq:
@@ -1962,6 +1976,8 @@ out_destroy_decrypt_wq:
destroy_workqueue(decrypt_wq);
out_destroy_cifsiod_wq:
destroy_workqueue(cifsiod_wq);
+out_destroy_serverclose_wq:
+ destroy_workqueue(serverclose_wq);
out_clean_proc:
cifs_proc_clean();
return rc;
@@ -1986,11 +2002,13 @@ exit_cifs(void)
#endif
cifs_destroy_request_bufs();
destroy_mids();
+ cifs_destroy_netfs();
cifs_destroy_inodecache();
destroy_workqueue(deferredclose_wq);
destroy_workqueue(cifsoplockd_wq);
destroy_workqueue(decrypt_wq);
destroy_workqueue(fileinfo_put_wq);
+ destroy_workqueue(serverclose_wq);
destroy_workqueue(cifsiod_wq);
cifs_proc_clean();
}
diff --git a/fs/smb/client/cifsfs.h b/fs/smb/client/cifsfs.h
index ca55d01117c8..87310f05d397 100644
--- a/fs/smb/client/cifsfs.h
+++ b/fs/smb/client/cifsfs.h
@@ -69,7 +69,6 @@ extern int cifs_revalidate_file_attr(struct file *filp);
extern int cifs_revalidate_dentry_attr(struct dentry *);
extern int cifs_revalidate_file(struct file *filp);
extern int cifs_revalidate_dentry(struct dentry *);
-extern int cifs_invalidate_mapping(struct inode *inode);
extern int cifs_revalidate_mapping(struct inode *inode);
extern int cifs_zap_mapping(struct inode *inode);
extern int cifs_getattr(struct mnt_idmap *, const struct path *,
@@ -85,6 +84,7 @@ extern const struct inode_operations cifs_namespace_inode_operations;
/* Functions related to files and directories */
+extern const struct netfs_request_ops cifs_req_ops;
extern const struct file_operations cifs_file_ops;
extern const struct file_operations cifs_file_direct_ops; /* if directio mnt */
extern const struct file_operations cifs_file_strict_ops; /* if strictio mnt */
@@ -94,12 +94,10 @@ extern const struct file_operations cifs_file_strict_nobrl_ops;
extern int cifs_open(struct inode *inode, struct file *file);
extern int cifs_close(struct inode *inode, struct file *file);
extern int cifs_closedir(struct inode *inode, struct file *file);
-extern ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to);
-extern ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to);
extern ssize_t cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to);
-extern ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from);
-extern ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from);
extern ssize_t cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from);
+ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from);
+ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter);
extern int cifs_flock(struct file *pfile, int cmd, struct file_lock *plock);
extern int cifs_lock(struct file *, int, struct file_lock *);
extern int cifs_fsync(struct file *, loff_t, loff_t, int);
@@ -110,9 +108,6 @@ extern int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma);
extern const struct file_operations cifs_dir_ops;
extern int cifs_dir_open(struct inode *inode, struct file *file);
extern int cifs_readdir(struct file *file, struct dir_context *ctx);
-extern void cifs_pages_written_back(struct inode *inode, loff_t start, unsigned int len);
-extern void cifs_pages_write_failed(struct inode *inode, loff_t start, unsigned int len);
-extern void cifs_pages_write_redirty(struct inode *inode, loff_t start, unsigned int len);
/* Functions related to dir entries */
extern const struct dentry_operations cifs_dentry_ops;
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 7ed9d05f6890..73482734a8d8 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -268,8 +268,7 @@ struct dfs_info3_param;
struct cifs_fattr;
struct smb3_fs_context;
struct cifs_fid;
-struct cifs_readdata;
-struct cifs_writedata;
+struct cifs_io_subrequest;
struct cifs_io_parms;
struct cifs_search_info;
struct cifsInodeInfo;
@@ -442,18 +441,17 @@ struct smb_version_operations {
/* set fid protocol-specific info */
void (*set_fid)(struct cifsFileInfo *, struct cifs_fid *, __u32);
/* close a file */
- void (*close)(const unsigned int, struct cifs_tcon *,
+ int (*close)(const unsigned int, struct cifs_tcon *,
struct cifs_fid *);
/* close a file, returning file attributes and timestamps */
- void (*close_getattr)(const unsigned int xid, struct cifs_tcon *tcon,
+ int (*close_getattr)(const unsigned int xid, struct cifs_tcon *tcon,
struct cifsFileInfo *pfile_info);
/* send a flush request to the server */
int (*flush)(const unsigned int, struct cifs_tcon *, struct cifs_fid *);
/* async read from the server */
- int (*async_readv)(struct cifs_readdata *);
+ int (*async_readv)(struct cifs_io_subrequest *);
/* async write to the server */
- int (*async_writev)(struct cifs_writedata *,
- void (*release)(struct kref *));
+ void (*async_writev)(struct cifs_io_subrequest *);
/* sync read from the server */
int (*sync_read)(const unsigned int, struct cifs_fid *,
struct cifs_io_parms *, unsigned int *, char **,
@@ -548,8 +546,8 @@ struct smb_version_operations {
/* writepages retry size */
unsigned int (*wp_retry_size)(struct inode *);
/* get mtu credits */
- int (*wait_mtu_credits)(struct TCP_Server_Info *, unsigned int,
- unsigned int *, struct cifs_credits *);
+ int (*wait_mtu_credits)(struct TCP_Server_Info *, size_t,
+ size_t *, struct cifs_credits *);
/* adjust previously taken mtu credits to request size */
int (*adjust_credits)(struct TCP_Server_Info *server,
struct cifs_credits *credits,
@@ -883,11 +881,12 @@ add_credits(struct TCP_Server_Info *server, const struct cifs_credits *credits,
static inline void
add_credits_and_wake_if(struct TCP_Server_Info *server,
- const struct cifs_credits *credits, const int optype)
+ struct cifs_credits *credits, const int optype)
{
if (credits->value) {
server->ops->add_credits(server, credits, optype);
wake_up(&server->request_q);
+ credits->value = 0;
}
}
@@ -1077,6 +1076,7 @@ struct cifs_ses {
and after mount option parsing we fill it */
char *domainName;
char *password;
+ char *password2; /* When key rotation used, new password may be set before it expires */
char workstation_name[CIFS_MAX_WORKSTATION_LEN];
struct session_key auth_key;
struct ntlmssp_auth *ntlmssp; /* ciphertext, flags, server challenge */
@@ -1189,6 +1189,7 @@ struct cifs_fattr {
*/
struct cifs_tcon {
struct list_head tcon_list;
+ int debug_id; /* Debugging for tracing */
int tc_count;
struct list_head rlist; /* reconnect list */
spinlock_t tc_lock; /* protect anything here that is not protected */
@@ -1275,13 +1276,14 @@ struct cifs_tcon {
__u32 max_cached_dirs;
#ifdef CONFIG_CIFS_FSCACHE
u64 resource_id; /* server resource id */
+ bool fscache_acquired; /* T if we've tried acquiring a cookie */
struct fscache_volume *fscache; /* cookie for share */
+ struct mutex fscache_lock; /* Prevent regetting a cookie */
#endif
struct list_head pending_opens; /* list of incomplete opens */
struct cached_fids *cfids;
/* BB add field for back pointer to sb struct(s)? */
#ifdef CONFIG_CIFS_DFS_UPCALL
- struct list_head dfs_ses_list;
struct delayed_work dfs_cache_work;
#endif
struct delayed_work query_interfaces; /* query interfaces workqueue job */
@@ -1440,6 +1442,7 @@ struct cifsFileInfo {
bool swapfile:1;
bool oplock_break_cancelled:1;
bool status_file_deleted:1; /* file has been deleted */
+ bool offload:1; /* offload final part of _put to a wq */
unsigned int oplock_epoch; /* epoch from the lease break */
__u32 oplock_level; /* oplock/lease level from the lease break */
int count;
@@ -1448,6 +1451,7 @@ struct cifsFileInfo {
struct cifs_search_info srch_inf;
struct work_struct oplock_break; /* work for oplock breaks */
struct work_struct put; /* work for the final part of _put */
+ struct work_struct serverclose; /* work for serverclose */
struct delayed_work deferred;
bool deferred_close_scheduled; /* Flag to indicate close is scheduled */
char *symlink_target;
@@ -1487,50 +1491,30 @@ struct cifs_aio_ctx {
bool direct_io;
};
-/* asynchronous read support */
-struct cifs_readdata {
- struct kref refcount;
- struct list_head list;
- struct completion done;
+struct cifs_io_request {
+ struct netfs_io_request rreq;
struct cifsFileInfo *cfile;
- struct address_space *mapping;
- struct cifs_aio_ctx *ctx;
- __u64 offset;
- ssize_t got_bytes;
- unsigned int bytes;
- pid_t pid;
- int result;
- struct work_struct work;
- struct iov_iter iter;
- struct kvec iov[2];
- struct TCP_Server_Info *server;
-#ifdef CONFIG_CIFS_SMB_DIRECT
- struct smbd_mr *mr;
-#endif
- struct cifs_credits credits;
};
-/* asynchronous write support */
-struct cifs_writedata {
- struct kref refcount;
- struct list_head list;
- struct completion done;
- enum writeback_sync_modes sync_mode;
- struct work_struct work;
- struct cifsFileInfo *cfile;
- struct cifs_aio_ctx *ctx;
- struct iov_iter iter;
- struct bio_vec *bv;
- __u64 offset;
+/* asynchronous read support */
+struct cifs_io_subrequest {
+ union {
+ struct netfs_io_subrequest subreq;
+ struct netfs_io_request *rreq;
+ struct cifs_io_request *req;
+ };
+ ssize_t got_bytes;
pid_t pid;
- unsigned int bytes;
+ unsigned int xid;
int result;
+ bool have_xid;
+ bool replay;
+ struct kvec iov[2];
struct TCP_Server_Info *server;
#ifdef CONFIG_CIFS_SMB_DIRECT
struct smbd_mr *mr;
#endif
struct cifs_credits credits;
- bool replay;
};
/*
@@ -1804,7 +1788,6 @@ struct cifs_mount_ctx {
struct TCP_Server_Info *server;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
- struct list_head dfs_ses_list;
};
static inline void __free_dfs_info_param(struct dfs_info3_param *param)
@@ -2012,6 +1995,7 @@ require use of the stronger protocol */
* ->chans_need_reconnect
* ->chans_in_reconnect
* cifs_tcon->tc_lock (anything that is not protected by another lock and can change)
+ * inode->i_rwsem, taken by fs/netfs/locking.c e.g. should be taken before cifsInodeInfo locks
* cifsInodeInfo->open_file_lock cifsInodeInfo->openFileList cifs_alloc_inode
* cifsInodeInfo->writers_lock cifsInodeInfo->writers cifsInodeInfo_alloc
* cifsInodeInfo->lock_sem cifsInodeInfo->llist cifs_init_once
@@ -2105,11 +2089,14 @@ extern struct workqueue_struct *decrypt_wq;
extern struct workqueue_struct *fileinfo_put_wq;
extern struct workqueue_struct *cifsoplockd_wq;
extern struct workqueue_struct *deferredclose_wq;
+extern struct workqueue_struct *serverclose_wq;
extern __u32 cifs_lock_secret;
extern mempool_t *cifs_sm_req_poolp;
extern mempool_t *cifs_req_poolp;
extern mempool_t *cifs_mid_poolp;
+extern mempool_t cifs_io_request_pool;
+extern mempool_t cifs_io_subrequest_pool;
/* Operations for different SMB versions */
#define SMB1_VERSION_STRING "1.0"
@@ -2324,4 +2311,14 @@ struct smb2_compound_vars {
struct kvec ea_iov;
};
+static inline bool cifs_ses_exiting(struct cifs_ses *ses)
+{
+ bool ret;
+
+ spin_lock(&ses->ses_lock);
+ ret = ses->ses_status == SES_EXITING;
+ spin_unlock(&ses->ses_lock);
+ return ret;
+}
+
#endif /* _CIFS_GLOB_H */
diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
index c0513fbb8a59..c46d418c1c0c 100644
--- a/fs/smb/client/cifspdu.h
+++ b/fs/smb/client/cifspdu.h
@@ -882,7 +882,7 @@ typedef struct smb_com_open_rsp {
__u8 OplockLevel;
__u16 Fid;
__le32 CreateAction;
- struct_group(common_attributes,
+ struct_group_attr(common_attributes, __packed,
__le64 CreationTime;
__le64 LastAccessTime;
__le64 LastWriteTime;
@@ -2266,7 +2266,7 @@ typedef struct {
/* QueryFileInfo/QueryPathinfo (also for SetPath/SetFile) data buffer formats */
/******************************************************************************/
typedef struct { /* data block encoding of response to level 263 QPathInfo */
- struct_group(common_attributes,
+ struct_group_attr(common_attributes, __packed,
__le64 CreationTime;
__le64 LastAccessTime;
__le64 LastWriteTime;
diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
index 0723e1b57256..c15bb5ee7eb7 100644
--- a/fs/smb/client/cifsproto.h
+++ b/fs/smb/client/cifsproto.h
@@ -121,7 +121,7 @@ extern struct mid_q_entry *cifs_setup_async_request(struct TCP_Server_Info *,
extern int cifs_check_receive(struct mid_q_entry *mid,
struct TCP_Server_Info *server, bool log_error);
extern int cifs_wait_mtu_credits(struct TCP_Server_Info *server,
- unsigned int size, unsigned int *num,
+ size_t size, size_t *num,
struct cifs_credits *credits);
extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
struct kvec *, int /* nvec to send */,
@@ -148,6 +148,8 @@ extern bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 eof,
bool from_readdir);
extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
unsigned int bytes_written);
+void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result,
+ bool was_async);
extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, int);
extern int cifs_get_writable_file(struct cifsInodeInfo *cifs_inode,
int flags,
@@ -303,7 +305,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
struct TCP_Server_Info *primary_server);
extern void cifs_put_tcp_session(struct TCP_Server_Info *server,
int from_reconnect);
-extern void cifs_put_tcon(struct cifs_tcon *tcon);
+extern void cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace);
extern void cifs_release_automount_timer(void);
@@ -530,8 +532,9 @@ extern int CIFSSMBLogoff(const unsigned int xid, struct cifs_ses *ses);
extern struct cifs_ses *sesInfoAlloc(void);
extern void sesInfoFree(struct cifs_ses *);
-extern struct cifs_tcon *tcon_info_alloc(bool dir_leases_enabled);
-extern void tconInfoFree(struct cifs_tcon *);
+extern struct cifs_tcon *tcon_info_alloc(bool dir_leases_enabled,
+ enum smb3_tcon_ref_trace trace);
+extern void tconInfoFree(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace);
extern int cifs_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server,
__u32 *pexpected_response_sequence_number);
@@ -598,15 +601,11 @@ void __cifs_put_smb_ses(struct cifs_ses *ses);
extern struct cifs_ses *
cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx);
-void cifs_readdata_release(struct kref *refcount);
-int cifs_async_readv(struct cifs_readdata *rdata);
+int cifs_async_readv(struct cifs_io_subrequest *rdata);
int cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid);
-int cifs_async_writev(struct cifs_writedata *wdata,
- void (*release)(struct kref *kref));
+void cifs_async_writev(struct cifs_io_subrequest *wdata);
void cifs_writev_complete(struct work_struct *work);
-struct cifs_writedata *cifs_writedata_alloc(work_func_t complete);
-void cifs_writedata_release(struct kref *refcount);
int cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb,
const unsigned char *path, char *pbuf,
@@ -721,35 +720,33 @@ static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options)
return options;
}
-struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon);
-void cifs_put_tcon_super(struct super_block *sb);
int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry);
-/* Put references of @ses and @ses->dfs_root_ses */
+/* Put references of @ses and its children */
static inline void cifs_put_smb_ses(struct cifs_ses *ses)
{
- struct cifs_ses *rses = ses->dfs_root_ses;
+ struct cifs_ses *next;
- __cifs_put_smb_ses(ses);
- if (rses)
- __cifs_put_smb_ses(rses);
+ do {
+ next = ses->dfs_root_ses;
+ __cifs_put_smb_ses(ses);
+ } while ((ses = next));
}
-/* Get an active reference of @ses and @ses->dfs_root_ses.
+/* Get an active reference of @ses and its children.
*
* NOTE: make sure to call this function when incrementing reference count of
* @ses to ensure that any DFS root session attached to it (@ses->dfs_root_ses)
* will also get its reference count incremented.
*
- * cifs_put_smb_ses() will put both references, so call it when you're done.
+ * cifs_put_smb_ses() will put all references, so call it when you're done.
*/
static inline void cifs_smb_ses_inc_refcount(struct cifs_ses *ses)
{
lockdep_assert_held(&cifs_tcp_ses_lock);
- ses->ses_count++;
- if (ses->dfs_root_ses)
- ses->dfs_root_ses->ses_count++;
+ for (; ses; ses = ses->dfs_root_ses)
+ ses->ses_count++;
}
static inline bool dfs_src_pathname_equal(const char *s1, const char *s2)
diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
index 5aee55551573..25e9ab947c17 100644
--- a/fs/smb/client/cifssmb.c
+++ b/fs/smb/client/cifssmb.c
@@ -24,6 +24,8 @@
#include <linux/swap.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/uaccess.h>
+#include <linux/netfs.h>
+#include <trace/events/netfs.h>
#include "cifspdu.h"
#include "cifsfs.h"
#include "cifsglob.h"
@@ -1262,18 +1264,17 @@ openRetry:
static void
cifs_readv_callback(struct mid_q_entry *mid)
{
- struct cifs_readdata *rdata = mid->callback_data;
- struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
+ struct cifs_io_subrequest *rdata = mid->callback_data;
+ struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
struct smb_rqst rqst = { .rq_iov = rdata->iov,
.rq_nvec = 2,
- .rq_iter_size = iov_iter_count(&rdata->iter),
- .rq_iter = rdata->iter };
+ .rq_iter = rdata->subreq.io_iter };
struct cifs_credits credits = { .value = 1, .instance = 0 };
- cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n",
+ cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%zu\n",
__func__, mid->mid, mid->mid_state, rdata->result,
- rdata->bytes);
+ rdata->subreq.len);
switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED:
@@ -1305,30 +1306,36 @@ cifs_readv_callback(struct mid_q_entry *mid)
rdata->result = -EIO;
}
- queue_work(cifsiod_wq, &rdata->work);
+ if (rdata->result == 0 || rdata->result == -EAGAIN)
+ iov_iter_advance(&rdata->subreq.io_iter, rdata->got_bytes);
+ rdata->credits.value = 0;
+ netfs_subreq_terminated(&rdata->subreq,
+ (rdata->result == 0 || rdata->result == -EAGAIN) ?
+ rdata->got_bytes : rdata->result,
+ false);
release_mid(mid);
add_credits(server, &credits, 0);
}
/* cifs_async_readv - send an async write, and set up mid to handle result */
int
-cifs_async_readv(struct cifs_readdata *rdata)
+cifs_async_readv(struct cifs_io_subrequest *rdata)
{
int rc;
READ_REQ *smb = NULL;
int wct;
- struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
+ struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink);
struct smb_rqst rqst = { .rq_iov = rdata->iov,
.rq_nvec = 2 };
- cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
- __func__, rdata->offset, rdata->bytes);
+ cifs_dbg(FYI, "%s: offset=%llu bytes=%zu\n",
+ __func__, rdata->subreq.start, rdata->subreq.len);
if (tcon->ses->capabilities & CAP_LARGE_FILES)
wct = 12;
else {
wct = 10; /* old style read */
- if ((rdata->offset >> 32) > 0) {
+ if ((rdata->subreq.start >> 32) > 0) {
/* can not handle this big offset for old */
return -EIO;
}
@@ -1342,13 +1349,13 @@ cifs_async_readv(struct cifs_readdata *rdata)
smb->hdr.PidHigh = cpu_to_le16((__u16)(rdata->pid >> 16));
smb->AndXCommand = 0xFF; /* none */
- smb->Fid = rdata->cfile->fid.netfid;
- smb->OffsetLow = cpu_to_le32(rdata->offset & 0xFFFFFFFF);
+ smb->Fid = rdata->req->cfile->fid.netfid;
+ smb->OffsetLow = cpu_to_le32(rdata->subreq.start & 0xFFFFFFFF);
if (wct == 12)
- smb->OffsetHigh = cpu_to_le32(rdata->offset >> 32);
+ smb->OffsetHigh = cpu_to_le32(rdata->subreq.start >> 32);
smb->Remaining = 0;
- smb->MaxCount = cpu_to_le16(rdata->bytes & 0xFFFF);
- smb->MaxCountHigh = cpu_to_le32(rdata->bytes >> 16);
+ smb->MaxCount = cpu_to_le16(rdata->subreq.len & 0xFFFF);
+ smb->MaxCountHigh = cpu_to_le32(rdata->subreq.len >> 16);
if (wct == 12)
smb->ByteCount = 0;
else {
@@ -1364,15 +1371,11 @@ cifs_async_readv(struct cifs_readdata *rdata)
rdata->iov[1].iov_base = (char *)smb + 4;
rdata->iov[1].iov_len = get_rfc1002_length(smb);
- kref_get(&rdata->refcount);
rc = cifs_call_async(tcon->ses->server, &rqst, cifs_readv_receive,
cifs_readv_callback, NULL, rdata, 0, NULL);
if (rc == 0)
cifs_stats_inc(&tcon->stats.cifs_stats.num_reads);
- else
- kref_put(&rdata->refcount, cifs_readdata_release);
-
cifs_small_buf_release(smb);
return rc;
}
@@ -1615,16 +1618,17 @@ CIFSSMBWrite(const unsigned int xid, struct cifs_io_parms *io_parms,
static void
cifs_writev_callback(struct mid_q_entry *mid)
{
- struct cifs_writedata *wdata = mid->callback_data;
- struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
- unsigned int written;
+ struct cifs_io_subrequest *wdata = mid->callback_data;
+ struct cifs_tcon *tcon = tlink_tcon(wdata->req->cfile->tlink);
WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf;
struct cifs_credits credits = { .value = 1, .instance = 0 };
+ ssize_t result;
+ size_t written;
switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED:
- wdata->result = cifs_check_receive(mid, tcon->ses->server, 0);
- if (wdata->result != 0)
+ result = cifs_check_receive(mid, tcon->ses->server, 0);
+ if (result != 0)
break;
written = le16_to_cpu(smb->CountHigh);
@@ -1636,37 +1640,37 @@ cifs_writev_callback(struct mid_q_entry *mid)
* client. OS/2 servers are known to set incorrect
* CountHigh values.
*/
- if (written > wdata->bytes)
+ if (written > wdata->subreq.len)
written &= 0xFFFF;
- if (written < wdata->bytes)
- wdata->result = -ENOSPC;
+ if (written < wdata->subreq.len)
+ result = -ENOSPC;
else
- wdata->bytes = written;
+ result = written;
break;
case MID_REQUEST_SUBMITTED:
case MID_RETRY_NEEDED:
- wdata->result = -EAGAIN;
+ result = -EAGAIN;
break;
default:
- wdata->result = -EIO;
+ result = -EIO;
break;
}
- queue_work(cifsiod_wq, &wdata->work);
+ wdata->credits.value = 0;
+ cifs_write_subrequest_terminated(wdata, result, true);
release_mid(mid);
add_credits(tcon->ses->server, &credits, 0);
}
/* cifs_async_writev - send an async write, and set up mid to handle result */
-int
-cifs_async_writev(struct cifs_writedata *wdata,
- void (*release)(struct kref *kref))
+void
+cifs_async_writev(struct cifs_io_subrequest *wdata)
{
int rc = -EACCES;
WRITE_REQ *smb = NULL;
int wct;
- struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
+ struct cifs_tcon *tcon = tlink_tcon(wdata->req->cfile->tlink);
struct kvec iov[2];
struct smb_rqst rqst = { };
@@ -1674,9 +1678,10 @@ cifs_async_writev(struct cifs_writedata *wdata,
wct = 14;
} else {
wct = 12;
- if (wdata->offset >> 32 > 0) {
+ if (wdata->subreq.start >> 32 > 0) {
/* can not handle big offset for old srv */
- return -EIO;
+ rc = -EIO;
+ goto out;
}
}
@@ -1688,10 +1693,10 @@ cifs_async_writev(struct cifs_writedata *wdata,
smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->pid >> 16));
smb->AndXCommand = 0xFF; /* none */
- smb->Fid = wdata->cfile->fid.netfid;
- smb->OffsetLow = cpu_to_le32(wdata->offset & 0xFFFFFFFF);
+ smb->Fid = wdata->req->cfile->fid.netfid;
+ smb->OffsetLow = cpu_to_le32(wdata->subreq.start & 0xFFFFFFFF);
if (wct == 14)
- smb->OffsetHigh = cpu_to_le32(wdata->offset >> 32);
+ smb->OffsetHigh = cpu_to_le32(wdata->subreq.start >> 32);
smb->Reserved = 0xFFFFFFFF;
smb->WriteMode = 0;
smb->Remaining = 0;
@@ -1707,39 +1712,40 @@ cifs_async_writev(struct cifs_writedata *wdata,
rqst.rq_iov = iov;
rqst.rq_nvec = 2;
- rqst.rq_iter = wdata->iter;
- rqst.rq_iter_size = iov_iter_count(&wdata->iter);
+ rqst.rq_iter = wdata->subreq.io_iter;
+ rqst.rq_iter_size = iov_iter_count(&wdata->subreq.io_iter);
- cifs_dbg(FYI, "async write at %llu %u bytes\n",
- wdata->offset, wdata->bytes);
+ cifs_dbg(FYI, "async write at %llu %zu bytes\n",
+ wdata->subreq.start, wdata->subreq.len);
- smb->DataLengthLow = cpu_to_le16(wdata->bytes & 0xFFFF);
- smb->DataLengthHigh = cpu_to_le16(wdata->bytes >> 16);
+ smb->DataLengthLow = cpu_to_le16(wdata->subreq.len & 0xFFFF);
+ smb->DataLengthHigh = cpu_to_le16(wdata->subreq.len >> 16);
if (wct == 14) {
- inc_rfc1001_len(&smb->hdr, wdata->bytes + 1);
- put_bcc(wdata->bytes + 1, &smb->hdr);
+ inc_rfc1001_len(&smb->hdr, wdata->subreq.len + 1);
+ put_bcc(wdata->subreq.len + 1, &smb->hdr);
} else {
/* wct == 12 */
struct smb_com_writex_req *smbw =
(struct smb_com_writex_req *)smb;
- inc_rfc1001_len(&smbw->hdr, wdata->bytes + 5);
- put_bcc(wdata->bytes + 5, &smbw->hdr);
+ inc_rfc1001_len(&smbw->hdr, wdata->subreq.len + 5);
+ put_bcc(wdata->subreq.len + 5, &smbw->hdr);
iov[1].iov_len += 4; /* pad bigger by four bytes */
}
- kref_get(&wdata->refcount);
rc = cifs_call_async(tcon->ses->server, &rqst, NULL,
cifs_writev_callback, NULL, wdata, 0, NULL);
-
+ /* Can't touch wdata if rc == 0 */
if (rc == 0)
cifs_stats_inc(&tcon->stats.cifs_stats.num_writes);
- else
- kref_put(&wdata->refcount, release);
async_writev_out:
cifs_small_buf_release(smb);
- return rc;
+out:
+ if (rc) {
+ add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
+ cifs_write_subrequest_terminated(wdata, rc, false);
+ }
}
int
@@ -5854,10 +5860,8 @@ SetEARetry:
parm_data->list.EA_flags = 0;
/* we checked above that name len is less than 255 */
parm_data->list.name_len = (__u8)name_len;
- /* EA names are always ASCII */
- if (ea_name)
- strncpy(parm_data->list.name, ea_name, name_len);
- parm_data->list.name[name_len] = '\0';
+ /* EA names are always ASCII and NUL-terminated */
+ strscpy(parm_data->list.name, ea_name ?: "", name_len + 1);
parm_data->list.value_len = cpu_to_le16(ea_value_len);
/* caller ensures that ea_value_len is less than 64K but
we need to ensure that it fits within the smb */
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index 9b85b5341822..7a16e12f5da8 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -175,6 +175,8 @@ cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+ if (cifs_ses_exiting(ses))
+ continue;
spin_lock(&ses->chan_lock);
for (i = 0; i < ses->chan_count; i++) {
if (!ses->chans[i].server)
@@ -232,7 +234,13 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) {
- /* check if iface is still active */
+ spin_lock(&ses->ses_lock);
+ if (ses->ses_status == SES_EXITING) {
+ spin_unlock(&ses->ses_lock);
+ continue;
+ }
+ spin_unlock(&ses->ses_lock);
+
spin_lock(&ses->chan_lock);
if (cifs_ses_get_chan_index(ses, server) ==
CIFS_INVAL_CHAN_INDEX) {
@@ -1860,6 +1868,9 @@ static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
ctx->sectype != ses->sectype)
return 0;
+ if (ctx->dfs_root_ses != ses->dfs_root_ses)
+ return 0;
+
/*
* If an existing session is limited to less channels than
* requested, it should not be reused
@@ -1932,7 +1943,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
}
/* no need to setup directory caching on IPC share, so pass in false */
- tcon = tcon_info_alloc(false);
+ tcon = tcon_info_alloc(false, netfs_trace_tcon_ref_new_ipc);
if (tcon == NULL)
return -ENOMEM;
@@ -1949,7 +1960,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
if (rc) {
cifs_server_dbg(VFS, "failed to connect to IPC (rc=%d)\n", rc);
- tconInfoFree(tcon);
+ tconInfoFree(tcon, netfs_trace_tcon_ref_free_ipc_fail);
goto out;
}
@@ -1963,31 +1974,6 @@ out:
return rc;
}
-/**
- * cifs_free_ipc - helper to release the session IPC tcon
- * @ses: smb session to unmount the IPC from
- *
- * Needs to be called everytime a session is destroyed.
- *
- * On session close, the IPC is closed and the server must release all tcons of the session.
- * No need to send a tree disconnect here.
- *
- * Besides, it will make the server to not close durable and resilient files on session close, as
- * specified in MS-SMB2 3.3.5.6 Receiving an SMB2 LOGOFF Request.
- */
-static int
-cifs_free_ipc(struct cifs_ses *ses)
-{
- struct cifs_tcon *tcon = ses->tcon_ipc;
-
- if (tcon == NULL)
- return 0;
-
- tconInfoFree(tcon);
- ses->tcon_ipc = NULL;
- return 0;
-}
-
static struct cifs_ses *
cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
{
@@ -2019,48 +2005,52 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
void __cifs_put_smb_ses(struct cifs_ses *ses)
{
struct TCP_Server_Info *server = ses->server;
+ struct cifs_tcon *tcon;
unsigned int xid;
size_t i;
+ bool do_logoff;
int rc;
+ spin_lock(&cifs_tcp_ses_lock);
spin_lock(&ses->ses_lock);
- if (ses->ses_status == SES_EXITING) {
+ cifs_dbg(FYI, "%s: id=0x%llx ses_count=%d ses_status=%u ipc=%s\n",
+ __func__, ses->Suid, ses->ses_count, ses->ses_status,
+ ses->tcon_ipc ? ses->tcon_ipc->tree_name : "none");
+ if (ses->ses_status == SES_EXITING || --ses->ses_count > 0) {
spin_unlock(&ses->ses_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
return;
}
- spin_unlock(&ses->ses_lock);
+ /* ses_count can never go negative */
+ WARN_ON(ses->ses_count < 0);
- cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count);
- cifs_dbg(FYI,
- "%s: ses ipc: %s\n", __func__, ses->tcon_ipc ? ses->tcon_ipc->tree_name : "NONE");
+ spin_lock(&ses->chan_lock);
+ cifs_chan_clear_need_reconnect(ses, server);
+ spin_unlock(&ses->chan_lock);
- spin_lock(&cifs_tcp_ses_lock);
- if (--ses->ses_count > 0) {
- spin_unlock(&cifs_tcp_ses_lock);
- return;
- }
- spin_lock(&ses->ses_lock);
- if (ses->ses_status == SES_GOOD)
- ses->ses_status = SES_EXITING;
+ do_logoff = ses->ses_status == SES_GOOD && server->ops->logoff;
+ ses->ses_status = SES_EXITING;
+ tcon = ses->tcon_ipc;
+ ses->tcon_ipc = NULL;
spin_unlock(&ses->ses_lock);
spin_unlock(&cifs_tcp_ses_lock);
- /* ses_count can never go negative */
- WARN_ON(ses->ses_count < 0);
-
- spin_lock(&ses->ses_lock);
- if (ses->ses_status == SES_EXITING && server->ops->logoff) {
- spin_unlock(&ses->ses_lock);
- cifs_free_ipc(ses);
+ /*
+ * On session close, the IPC is closed and the server must release all
+ * tcons of the session. No need to send a tree disconnect here.
+ *
+ * Besides, it will make the server to not close durable and resilient
+ * files on session close, as specified in MS-SMB2 3.3.5.6 Receiving an
+ * SMB2 LOGOFF Request.
+ */
+ tconInfoFree(tcon, netfs_trace_tcon_ref_free_ipc);
+ if (do_logoff) {
xid = get_xid();
rc = server->ops->logoff(xid, ses);
if (rc)
cifs_server_dbg(VFS, "%s: Session Logoff failure rc=%d\n",
__func__, rc);
_free_xid(xid);
- } else {
- spin_unlock(&ses->ses_lock);
- cifs_free_ipc(ses);
}
spin_lock(&cifs_tcp_ses_lock);
@@ -2193,6 +2183,7 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
}
++delim;
+ /* BB consider adding support for password2 (Key Rotation) for multiuser in future */
ctx->password = kstrndup(delim, len, GFP_KERNEL);
if (!ctx->password) {
cifs_dbg(FYI, "Unable to allocate %zd bytes for password\n",
@@ -2216,6 +2207,7 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
kfree(ctx->username);
ctx->username = NULL;
kfree_sensitive(ctx->password);
+ /* no need to free ctx->password2 since not allocated in this path */
ctx->password = NULL;
goto out_key_put;
}
@@ -2327,6 +2319,12 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
if (!ses->password)
goto get_ses_fail;
}
+ /* ctx->password freed at unmount */
+ if (ctx->password2) {
+ ses->password2 = kstrdup(ctx->password2, GFP_KERNEL);
+ if (!ses->password2)
+ goto get_ses_fail;
+ }
if (ctx->domainname) {
ses->domainName = kstrdup(ctx->domainname, GFP_KERNEL);
if (!ses->domainName)
@@ -2373,9 +2371,9 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
* need to lock before changing something in the session.
*/
spin_lock(&cifs_tcp_ses_lock);
+ if (ctx->dfs_root_ses)
+ cifs_smb_ses_inc_refcount(ctx->dfs_root_ses);
ses->dfs_root_ses = ctx->dfs_root_ses;
- if (ses->dfs_root_ses)
- ses->dfs_root_ses->ses_count++;
list_add(&ses->smb_ses_list, &server->smb_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
@@ -2434,6 +2432,8 @@ cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
continue;
}
++tcon->tc_count;
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+ netfs_trace_tcon_ref_get_find);
spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock);
return tcon;
@@ -2443,7 +2443,7 @@ cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
}
void
-cifs_put_tcon(struct cifs_tcon *tcon)
+cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace)
{
unsigned int xid;
struct cifs_ses *ses;
@@ -2459,6 +2459,7 @@ cifs_put_tcon(struct cifs_tcon *tcon)
cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&tcon->tc_lock);
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count - 1, trace);
if (--tcon->tc_count > 0) {
spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock);
@@ -2495,7 +2496,7 @@ cifs_put_tcon(struct cifs_tcon *tcon)
_free_xid(xid);
cifs_fscache_release_super_cookie(tcon);
- tconInfoFree(tcon);
+ tconInfoFree(tcon, netfs_trace_tcon_ref_free);
cifs_put_smb_ses(ses);
}
@@ -2549,7 +2550,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
nohandlecache = ctx->nohandlecache;
else
nohandlecache = true;
- tcon = tcon_info_alloc(!nohandlecache);
+ tcon = tcon_info_alloc(!nohandlecache, netfs_trace_tcon_ref_new);
if (tcon == NULL) {
rc = -ENOMEM;
goto out_fail;
@@ -2739,7 +2740,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
return tcon;
out_fail:
- tconInfoFree(tcon);
+ tconInfoFree(tcon, netfs_trace_tcon_ref_free_fail);
return ERR_PTR(rc);
}
@@ -2756,7 +2757,7 @@ cifs_put_tlink(struct tcon_link *tlink)
}
if (!IS_ERR(tlink_tcon(tlink)))
- cifs_put_tcon(tlink_tcon(tlink));
+ cifs_put_tcon(tlink_tcon(tlink), netfs_trace_tcon_ref_put_tlink);
kfree(tlink);
}
@@ -3321,11 +3322,14 @@ void cifs_mount_put_conns(struct cifs_mount_ctx *mnt_ctx)
int rc = 0;
if (mnt_ctx->tcon)
- cifs_put_tcon(mnt_ctx->tcon);
+ cifs_put_tcon(mnt_ctx->tcon, netfs_trace_tcon_ref_put_mnt_ctx);
else if (mnt_ctx->ses)
cifs_put_smb_ses(mnt_ctx->ses);
else if (mnt_ctx->server)
cifs_put_tcp_session(mnt_ctx->server, 0);
+ mnt_ctx->ses = NULL;
+ mnt_ctx->tcon = NULL;
+ mnt_ctx->server = NULL;
mnt_ctx->cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_POSIX_PATHS;
free_xid(mnt_ctx->xid);
}
@@ -3604,8 +3608,6 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
bool isdfs;
int rc;
- INIT_LIST_HEAD(&mnt_ctx.dfs_ses_list);
-
rc = dfs_mount_share(&mnt_ctx, &isdfs);
if (rc)
goto error;
@@ -3636,7 +3638,6 @@ out:
return rc;
error:
- dfs_put_root_smb_sessions(&mnt_ctx.dfs_ses_list);
cifs_mount_put_conns(&mnt_ctx);
return rc;
}
@@ -3651,6 +3652,18 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
goto error;
rc = cifs_mount_get_tcon(&mnt_ctx);
+ if (!rc) {
+ /*
+ * Prevent superblock from being created with any missing
+ * connections.
+ */
+ if (WARN_ON(!mnt_ctx.server))
+ rc = -EHOSTDOWN;
+ else if (WARN_ON(!mnt_ctx.ses))
+ rc = -EACCES;
+ else if (WARN_ON(!mnt_ctx.tcon))
+ rc = -ENOENT;
+ }
if (rc)
goto error;
@@ -3988,13 +4001,14 @@ cifs_set_vol_auth(struct smb3_fs_context *ctx, struct cifs_ses *ses)
}
static struct cifs_tcon *
-cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
+__cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
{
int rc;
struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
struct cifs_ses *ses;
struct cifs_tcon *tcon = NULL;
struct smb3_fs_context *ctx;
+ char *origin_fullpath = NULL;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx == NULL)
@@ -4018,6 +4032,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
ctx->sign = master_tcon->ses->sign;
ctx->seal = master_tcon->seal;
ctx->witness = master_tcon->use_witness;
+ ctx->dfs_root_ses = master_tcon->ses->dfs_root_ses;
rc = cifs_set_vol_auth(ctx, master_tcon->ses);
if (rc) {
@@ -4037,12 +4052,39 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
goto out;
}
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ spin_lock(&master_tcon->tc_lock);
+ if (master_tcon->origin_fullpath) {
+ spin_unlock(&master_tcon->tc_lock);
+ origin_fullpath = dfs_get_path(cifs_sb, cifs_sb->ctx->source);
+ if (IS_ERR(origin_fullpath)) {
+ tcon = ERR_CAST(origin_fullpath);
+ origin_fullpath = NULL;
+ cifs_put_smb_ses(ses);
+ goto out;
+ }
+ } else {
+ spin_unlock(&master_tcon->tc_lock);
+ }
+#endif
+
tcon = cifs_get_tcon(ses, ctx);
if (IS_ERR(tcon)) {
cifs_put_smb_ses(ses);
goto out;
}
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ if (origin_fullpath) {
+ spin_lock(&tcon->tc_lock);
+ tcon->origin_fullpath = origin_fullpath;
+ spin_unlock(&tcon->tc_lock);
+ origin_fullpath = NULL;
+ queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
+ dfs_cache_get_ttl() * HZ);
+ }
+#endif
+
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
if (cap_unix(ses))
reset_cifs_unix_caps(0, tcon, NULL, ctx);
@@ -4051,11 +4093,23 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
out:
kfree(ctx->username);
kfree_sensitive(ctx->password);
+ kfree(origin_fullpath);
kfree(ctx);
return tcon;
}
+static struct cifs_tcon *
+cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
+{
+ struct cifs_tcon *ret;
+
+ cifs_mount_lock();
+ ret = __cifs_construct_tcon(cifs_sb, fsuid);
+ cifs_mount_unlock();
+ return ret;
+}
+
struct cifs_tcon *
cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
{
diff --git a/fs/smb/client/dfs.c b/fs/smb/client/dfs.c
index 449c59830039..3ec965547e3d 100644
--- a/fs/smb/client/dfs.c
+++ b/fs/smb/client/dfs.c
@@ -66,33 +66,20 @@ static int get_session(struct cifs_mount_ctx *mnt_ctx, const char *full_path)
}
/*
- * Track individual DFS referral servers used by new DFS mount.
- *
- * On success, their lifetime will be shared by final tcon (dfs_ses_list).
- * Otherwise, they will be put by dfs_put_root_smb_sessions() in cifs_mount().
+ * Get an active reference of @ses so that next call to cifs_put_tcon() won't
+ * release it as any new DFS referrals must go through its IPC tcon.
*/
-static int add_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
+static void add_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
{
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
- struct dfs_root_ses *root_ses;
struct cifs_ses *ses = mnt_ctx->ses;
if (ses) {
- root_ses = kmalloc(sizeof(*root_ses), GFP_KERNEL);
- if (!root_ses)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&root_ses->list);
-
spin_lock(&cifs_tcp_ses_lock);
cifs_smb_ses_inc_refcount(ses);
spin_unlock(&cifs_tcp_ses_lock);
- root_ses->ses = ses;
- list_add_tail(&root_ses->list, &mnt_ctx->dfs_ses_list);
}
- /* Select new DFS referral server so that new referrals go through it */
ctx->dfs_root_ses = ses;
- return 0;
}
static inline int parse_dfs_target(struct smb3_fs_context *ctx,
@@ -185,11 +172,8 @@ again:
continue;
}
- if (is_refsrv) {
- rc = add_root_smb_session(mnt_ctx);
- if (rc)
- goto out;
- }
+ if (is_refsrv)
+ add_root_smb_session(mnt_ctx);
rc = ref_walk_advance(rw);
if (!rc) {
@@ -232,6 +216,7 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
struct cifs_tcon *tcon;
char *origin_fullpath;
+ bool new_tcon = true;
int rc;
origin_fullpath = dfs_get_path(cifs_sb, ctx->source);
@@ -239,6 +224,18 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
return PTR_ERR(origin_fullpath);
rc = dfs_referral_walk(mnt_ctx);
+ if (!rc) {
+ /*
+ * Prevent superblock from being created with any missing
+ * connections.
+ */
+ if (WARN_ON(!mnt_ctx->server))
+ rc = -EHOSTDOWN;
+ else if (WARN_ON(!mnt_ctx->ses))
+ rc = -EACCES;
+ else if (WARN_ON(!mnt_ctx->tcon))
+ rc = -ENOENT;
+ }
if (rc)
goto out;
@@ -247,15 +244,14 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
if (!tcon->origin_fullpath) {
tcon->origin_fullpath = origin_fullpath;
origin_fullpath = NULL;
+ } else {
+ new_tcon = false;
}
spin_unlock(&tcon->tc_lock);
- if (list_empty(&tcon->dfs_ses_list)) {
- list_replace_init(&mnt_ctx->dfs_ses_list, &tcon->dfs_ses_list);
+ if (new_tcon) {
queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
dfs_cache_get_ttl() * HZ);
- } else {
- dfs_put_root_smb_sessions(&mnt_ctx->dfs_ses_list);
}
out:
@@ -298,7 +294,6 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
if (rc)
return rc;
- ctx->dfs_root_ses = mnt_ctx->ses;
/*
* If called with 'nodfs' mount option, then skip DFS resolving. Otherwise unconditionally
* try to get an DFS referral (even cached) to determine whether it is an DFS mount.
@@ -324,7 +319,9 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
*isdfs = true;
add_root_smb_session(mnt_ctx);
- return __dfs_mount_share(mnt_ctx);
+ rc = __dfs_mount_share(mnt_ctx);
+ dfs_put_root_smb_sessions(mnt_ctx);
+ return rc;
}
/* Update dfs referral path of superblock */
diff --git a/fs/smb/client/dfs.h b/fs/smb/client/dfs.h
index 875ab7ae57fc..e5c4dcf83750 100644
--- a/fs/smb/client/dfs.h
+++ b/fs/smb/client/dfs.h
@@ -7,7 +7,9 @@
#define _CIFS_DFS_H
#include "cifsglob.h"
+#include "cifsproto.h"
#include "fs_context.h"
+#include "dfs_cache.h"
#include "cifs_unicode.h"
#include <linux/namei.h>
@@ -114,11 +116,6 @@ static inline void ref_walk_set_tgt_hint(struct dfs_ref_walk *rw)
ref_walk_tit(rw));
}
-struct dfs_root_ses {
- struct list_head list;
- struct cifs_ses *ses;
-};
-
int dfs_parse_target_referral(const char *full_path, const struct dfs_info3_param *ref,
struct smb3_fs_context *ctx);
int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs);
@@ -133,20 +130,32 @@ static inline int dfs_get_referral(struct cifs_mount_ctx *mnt_ctx, const char *p
{
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+ struct cifs_ses *rses = ctx->dfs_root_ses ?: mnt_ctx->ses;
- return dfs_cache_find(mnt_ctx->xid, ctx->dfs_root_ses, cifs_sb->local_nls,
+ return dfs_cache_find(mnt_ctx->xid, rses, cifs_sb->local_nls,
cifs_remap(cifs_sb), path, ref, tl);
}
-static inline void dfs_put_root_smb_sessions(struct list_head *head)
+/*
+ * cifs_get_smb_ses() already guarantees an active reference of
+ * @ses->dfs_root_ses when a new session is created, so we need to put extra
+ * references of all DFS root sessions that were used across the mount process
+ * in dfs_mount_share().
+ */
+static inline void dfs_put_root_smb_sessions(struct cifs_mount_ctx *mnt_ctx)
{
- struct dfs_root_ses *root, *tmp;
+ const struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ struct cifs_ses *ses = ctx->dfs_root_ses;
+ struct cifs_ses *cur;
+
+ if (!ses)
+ return;
- list_for_each_entry_safe(root, tmp, head, list) {
- list_del_init(&root->list);
- cifs_put_smb_ses(root->ses);
- kfree(root);
+ for (cur = ses; cur; cur = cur->dfs_root_ses) {
+ if (cur->dfs_root_ses)
+ cifs_put_smb_ses(cur->dfs_root_ses);
}
+ cifs_put_smb_ses(ses);
}
#endif /* _CIFS_DFS_H */
diff --git a/fs/smb/client/dfs_cache.c b/fs/smb/client/dfs_cache.c
index 508d831fabe3..11c8efecf7aa 100644
--- a/fs/smb/client/dfs_cache.c
+++ b/fs/smb/client/dfs_cache.c
@@ -1172,8 +1172,8 @@ static bool is_ses_good(struct cifs_ses *ses)
return ret;
}
-/* Refresh dfs referral of tcon and mark it for reconnect if needed */
-static int __refresh_tcon(const char *path, struct cifs_ses *ses, bool force_refresh)
+/* Refresh dfs referral of @ses and mark it for reconnect if needed */
+static void __refresh_ses_referral(struct cifs_ses *ses, bool force_refresh)
{
struct TCP_Server_Info *server = ses->server;
DFS_CACHE_TGT_LIST(old_tl);
@@ -1181,10 +1181,21 @@ static int __refresh_tcon(const char *path, struct cifs_ses *ses, bool force_ref
bool needs_refresh = false;
struct cache_entry *ce;
unsigned int xid;
+ char *path = NULL;
int rc = 0;
xid = get_xid();
+ mutex_lock(&server->refpath_lock);
+ if (server->leaf_fullpath) {
+ path = kstrdup(server->leaf_fullpath + 1, GFP_ATOMIC);
+ if (!path)
+ rc = -ENOMEM;
+ }
+ mutex_unlock(&server->refpath_lock);
+ if (!path)
+ goto out;
+
down_read(&htable_rw_lock);
ce = lookup_cache_entry(path);
needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
@@ -1218,19 +1229,17 @@ out:
free_xid(xid);
dfs_cache_free_tgts(&old_tl);
dfs_cache_free_tgts(&new_tl);
- return rc;
+ kfree(path);
}
-static int refresh_tcon(struct cifs_tcon *tcon, bool force_refresh)
+static inline void refresh_ses_referral(struct cifs_ses *ses)
{
- struct TCP_Server_Info *server = tcon->ses->server;
- struct cifs_ses *ses = tcon->ses;
+ __refresh_ses_referral(ses, false);
+}
- mutex_lock(&server->refpath_lock);
- if (server->leaf_fullpath)
- __refresh_tcon(server->leaf_fullpath + 1, ses, force_refresh);
- mutex_unlock(&server->refpath_lock);
- return 0;
+static inline void force_refresh_ses_referral(struct cifs_ses *ses)
+{
+ __refresh_ses_referral(ses, true);
}
/**
@@ -1271,34 +1280,20 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
*/
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
- return refresh_tcon(tcon, true);
+ force_refresh_ses_referral(tcon->ses);
+ return 0;
}
/* Refresh all DFS referrals related to DFS tcon */
void dfs_cache_refresh(struct work_struct *work)
{
- struct TCP_Server_Info *server;
- struct dfs_root_ses *rses;
struct cifs_tcon *tcon;
struct cifs_ses *ses;
tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work);
- ses = tcon->ses;
- server = ses->server;
- mutex_lock(&server->refpath_lock);
- if (server->leaf_fullpath)
- __refresh_tcon(server->leaf_fullpath + 1, ses, false);
- mutex_unlock(&server->refpath_lock);
-
- list_for_each_entry(rses, &tcon->dfs_ses_list, list) {
- ses = rses->ses;
- server = ses->server;
- mutex_lock(&server->refpath_lock);
- if (server->leaf_fullpath)
- __refresh_tcon(server->leaf_fullpath + 1, ses, false);
- mutex_unlock(&server->refpath_lock);
- }
+ for (ses = tcon->ses; ses; ses = ses->dfs_root_ses)
+ refresh_ses_referral(ses);
queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
atomic_read(&dfs_cache_ttl) * HZ);
diff --git a/fs/smb/client/dir.c b/fs/smb/client/dir.c
index 89333d9bce36..864b194dbaa0 100644
--- a/fs/smb/client/dir.c
+++ b/fs/smb/client/dir.c
@@ -189,6 +189,7 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
int disposition;
struct TCP_Server_Info *server = tcon->ses->server;
struct cifs_open_parms oparms;
+ int rdwr_for_fscache = 0;
*oplock = 0;
if (tcon->ses->server->oplocks)
@@ -200,6 +201,10 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
return PTR_ERR(full_path);
}
+ /* If we're caching, we need to be able to fill in around partial writes. */
+ if (cifs_fscache_enabled(inode) && (oflags & O_ACCMODE) == O_WRONLY)
+ rdwr_for_fscache = 1;
+
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
if (tcon->unix_ext && cap_unix(tcon->ses) && !tcon->broken_posix_open &&
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
@@ -276,6 +281,8 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
desired_access |= GENERIC_READ; /* is this too little? */
if (OPEN_FMODE(oflags) & FMODE_WRITE)
desired_access |= GENERIC_WRITE;
+ if (rdwr_for_fscache == 1)
+ desired_access |= GENERIC_READ;
disposition = FILE_OVERWRITE_IF;
if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
@@ -304,6 +311,7 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
if (!tcon->unix_ext && (mode & S_IWUGO) == 0)
create_options |= CREATE_OPTION_READONLY;
+retry_open:
oparms = (struct cifs_open_parms) {
.tcon = tcon,
.cifs_sb = cifs_sb,
@@ -317,8 +325,15 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
rc = server->ops->open(xid, &oparms, oplock, buf);
if (rc) {
cifs_dbg(FYI, "cifs_create returned 0x%x\n", rc);
+ if (rc == -EACCES && rdwr_for_fscache == 1) {
+ desired_access &= ~GENERIC_READ;
+ rdwr_for_fscache = 2;
+ goto retry_open;
+ }
goto out;
}
+ if (rdwr_for_fscache == 2)
+ cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
/*
@@ -612,11 +627,18 @@ int cifs_mknod(struct mnt_idmap *idmap, struct inode *inode,
goto mknod_out;
}
+ trace_smb3_mknod_enter(xid, tcon->ses->Suid, tcon->tid, full_path);
+
rc = tcon->ses->server->ops->make_node(xid, inode, direntry, tcon,
full_path, mode,
device_number);
mknod_out:
+ if (rc)
+ trace_smb3_mknod_err(xid, tcon->ses->Suid, tcon->tid, rc);
+ else
+ trace_smb3_mknod_done(xid, tcon->ses->Suid, tcon->tid);
+
free_dentry_path(page);
free_xid(xid);
cifs_put_tlink(tlink);
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index 16aadce492b2..9d38294a7e68 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -36,132 +36,322 @@
#include "fs_context.h"
#include "cifs_ioctl.h"
#include "cached_dir.h"
+#include <trace/events/netfs.h>
+
+static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
/*
- * Remove the dirty flags from a span of pages.
+ * Prepare a subrequest to upload to the server. We need to allocate credits
+ * so that we know the maximum amount of data that we can include in it.
*/
-static void cifs_undirty_folios(struct inode *inode, loff_t start, unsigned int len)
+static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
{
- struct address_space *mapping = inode->i_mapping;
- struct folio *folio;
- pgoff_t end;
+ struct cifs_io_subrequest *wdata =
+ container_of(subreq, struct cifs_io_subrequest, subreq);
+ struct cifs_io_request *req = wdata->req;
+ struct TCP_Server_Info *server;
+ struct cifsFileInfo *open_file = req->cfile;
+ size_t wsize = req->rreq.wsize;
+ int rc;
- XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
+ if (!wdata->have_xid) {
+ wdata->xid = get_xid();
+ wdata->have_xid = true;
+ }
- rcu_read_lock();
+ server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
+ wdata->server = server;
- end = (start + len - 1) / PAGE_SIZE;
- xas_for_each_marked(&xas, folio, end, PAGECACHE_TAG_DIRTY) {
- if (xas_retry(&xas, folio))
- continue;
- xas_pause(&xas);
- rcu_read_unlock();
- folio_lock(folio);
- folio_clear_dirty_for_io(folio);
- folio_unlock(folio);
- rcu_read_lock();
+retry:
+ if (open_file->invalidHandle) {
+ rc = cifs_reopen_file(open_file, false);
+ if (rc < 0) {
+ if (rc == -EAGAIN)
+ goto retry;
+ subreq->error = rc;
+ return netfs_prepare_write_failed(subreq);
+ }
+ }
+
+ rc = server->ops->wait_mtu_credits(server, wsize, &wdata->subreq.max_len,
+ &wdata->credits);
+ if (rc < 0) {
+ subreq->error = rc;
+ return netfs_prepare_write_failed(subreq);
}
- rcu_read_unlock();
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (server->smbd_conn)
+ subreq->max_nr_segs = server->smbd_conn->max_frmr_depth;
+#endif
}
/*
- * Completion of write to server.
+ * Issue a subrequest to upload to the server.
*/
-void cifs_pages_written_back(struct inode *inode, loff_t start, unsigned int len)
+static void cifs_issue_write(struct netfs_io_subrequest *subreq)
{
- struct address_space *mapping = inode->i_mapping;
- struct folio *folio;
- pgoff_t end;
+ struct cifs_io_subrequest *wdata =
+ container_of(subreq, struct cifs_io_subrequest, subreq);
+ struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
+ int rc;
+
+ if (cifs_forced_shutdown(sbi)) {
+ rc = -EIO;
+ goto fail;
+ }
- XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
+ rc = adjust_credits(wdata->server, &wdata->credits, wdata->subreq.len);
+ if (rc)
+ goto fail;
- if (!len)
- return;
+ rc = -EAGAIN;
+ if (wdata->req->cfile->invalidHandle)
+ goto fail;
- rcu_read_lock();
+ wdata->server->ops->async_writev(wdata);
+out:
+ return;
+
+fail:
+ if (rc == -EAGAIN)
+ trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
+ else
+ trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
+ add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
+ cifs_write_subrequest_terminated(wdata, rc, false);
+ goto out;
+}
+
+/*
+ * Split the read up according to how many credits we can get for each piece.
+ * It's okay to sleep here if we need to wait for more credit to become
+ * available.
+ *
+ * We also choose the server and allocate an operation ID to be cleaned up
+ * later.
+ */
+static bool cifs_clamp_length(struct netfs_io_subrequest *subreq)
+{
+ struct netfs_io_request *rreq = subreq->rreq;
+ struct TCP_Server_Info *server;
+ struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
+ struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
+ size_t rsize = 0;
+ int rc;
+
+ rdata->xid = get_xid();
+ rdata->have_xid = true;
+
+ server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
+ rdata->server = server;
+
+ if (cifs_sb->ctx->rsize == 0)
+ cifs_sb->ctx->rsize =
+ server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink),
+ cifs_sb->ctx);
- end = (start + len - 1) / PAGE_SIZE;
- xas_for_each(&xas, folio, end) {
- if (xas_retry(&xas, folio))
- continue;
- if (!folio_test_writeback(folio)) {
- WARN_ONCE(1, "bad %x @%llx page %lx %lx\n",
- len, start, folio->index, end);
- continue;
- }
- folio_detach_private(folio);
- folio_end_writeback(folio);
+ rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, &rsize,
+ &rdata->credits);
+ if (rc) {
+ subreq->error = rc;
+ return false;
}
- rcu_read_unlock();
+ subreq->len = min_t(size_t, subreq->len, rsize);
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (server->smbd_conn)
+ subreq->max_nr_segs = server->smbd_conn->max_frmr_depth;
+#endif
+ return true;
}
/*
- * Failure of write to server.
+ * Issue a read operation on behalf of the netfs helper functions. We're asked
+ * to make a read of a certain size at a point in the file. We are permitted
+ * to only read a portion of that, but as long as we read something, the netfs
+ * helper will call us again so that we can issue another read.
*/
-void cifs_pages_write_failed(struct inode *inode, loff_t start, unsigned int len)
+static void cifs_req_issue_read(struct netfs_io_subrequest *subreq)
{
- struct address_space *mapping = inode->i_mapping;
- struct folio *folio;
- pgoff_t end;
+ struct netfs_io_request *rreq = subreq->rreq;
+ struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
+ struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
+ pid_t pid;
+ int rc = 0;
- XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
+ pid = req->cfile->pid;
+ else
+ pid = current->tgid; // Ummm... This may be a workqueue
- if (!len)
- return;
+ cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
+ __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
+ subreq->transferred, subreq->len);
- rcu_read_lock();
+ if (req->cfile->invalidHandle) {
+ do {
+ rc = cifs_reopen_file(req->cfile, true);
+ } while (rc == -EAGAIN);
+ if (rc)
+ goto out;
+ }
- end = (start + len - 1) / PAGE_SIZE;
- xas_for_each(&xas, folio, end) {
- if (xas_retry(&xas, folio))
- continue;
- if (!folio_test_writeback(folio)) {
- WARN_ONCE(1, "bad %x @%llx page %lx %lx\n",
- len, start, folio->index, end);
- continue;
- }
+ __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+ rdata->pid = pid;
- folio_set_error(folio);
- folio_end_writeback(folio);
+ rc = adjust_credits(rdata->server, &rdata->credits, rdata->subreq.len);
+ if (!rc) {
+ if (rdata->req->cfile->invalidHandle)
+ rc = -EAGAIN;
+ else
+ rc = rdata->server->ops->async_readv(rdata);
}
- rcu_read_unlock();
+out:
+ if (rc)
+ netfs_subreq_terminated(subreq, rc, false);
}
/*
- * Redirty pages after a temporary failure.
+ * Writeback calls this when it finds a folio that needs uploading. This isn't
+ * called if writeback only has copy-to-cache to deal with.
*/
-void cifs_pages_write_redirty(struct inode *inode, loff_t start, unsigned int len)
+static void cifs_begin_writeback(struct netfs_io_request *wreq)
{
- struct address_space *mapping = inode->i_mapping;
- struct folio *folio;
- pgoff_t end;
-
- XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
+ struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
+ int ret;
- if (!len)
+ ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
+ if (ret) {
+ cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
return;
+ }
- rcu_read_lock();
+ wreq->io_streams[0].avail = true;
+}
- end = (start + len - 1) / PAGE_SIZE;
- xas_for_each(&xas, folio, end) {
- if (!folio_test_writeback(folio)) {
- WARN_ONCE(1, "bad %x @%llx page %lx %lx\n",
- len, start, folio->index, end);
- continue;
- }
+/*
+ * Initialise a request.
+ */
+static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
+{
+ struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
+ struct cifsFileInfo *open_file = NULL;
+
+ rreq->rsize = cifs_sb->ctx->rsize;
+ rreq->wsize = cifs_sb->ctx->wsize;
+
+ if (file) {
+ open_file = file->private_data;
+ rreq->netfs_priv = file->private_data;
+ req->cfile = cifsFileInfo_get(open_file);
+ } else if (rreq->origin != NETFS_WRITEBACK) {
+ WARN_ON_ONCE(1);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * Expand the size of a readahead to the size of the rsize, if at least as
+ * large as a page, allowing for the possibility that rsize is not pow-2
+ * aligned.
+ */
+static void cifs_expand_readahead(struct netfs_io_request *rreq)
+{
+ unsigned int rsize = rreq->rsize;
+ loff_t misalignment, i_size = i_size_read(rreq->inode);
- filemap_dirty_folio(folio->mapping, folio);
- folio_end_writeback(folio);
+ if (rsize < PAGE_SIZE)
+ return;
+
+ if (rsize < INT_MAX)
+ rsize = roundup_pow_of_two(rsize);
+ else
+ rsize = ((unsigned int)INT_MAX + 1) / 2;
+
+ misalignment = rreq->start & (rsize - 1);
+ if (misalignment) {
+ rreq->start -= misalignment;
+ rreq->len += misalignment;
}
- rcu_read_unlock();
+ rreq->len = round_up(rreq->len, rsize);
+ if (rreq->start < i_size && rreq->len > i_size - rreq->start)
+ rreq->len = i_size - rreq->start;
+}
+
+/*
+ * Completion of a request operation.
+ */
+static void cifs_rreq_done(struct netfs_io_request *rreq)
+{
+ struct timespec64 atime, mtime;
+ struct inode *inode = rreq->inode;
+
+ /* we do not want atime to be less than mtime, it broke some apps */
+ atime = inode_set_atime_to_ts(inode, current_time(inode));
+ mtime = inode_get_mtime(inode);
+ if (timespec64_compare(&atime, &mtime))
+ inode_set_atime_to_ts(inode, inode_get_mtime(inode));
+}
+
+static void cifs_post_modify(struct inode *inode)
+{
+ /* Indication to update ctime and mtime as close is deferred */
+ set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
}
+static void cifs_free_request(struct netfs_io_request *rreq)
+{
+ struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
+
+ if (req->cfile)
+ cifsFileInfo_put(req->cfile);
+}
+
+static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
+{
+ struct cifs_io_subrequest *rdata =
+ container_of(subreq, struct cifs_io_subrequest, subreq);
+ int rc = subreq->error;
+
+ if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (rdata->mr) {
+ smbd_deregister_mr(rdata->mr);
+ rdata->mr = NULL;
+ }
+#endif
+ }
+
+ add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
+ if (rdata->have_xid)
+ free_xid(rdata->xid);
+}
+
+const struct netfs_request_ops cifs_req_ops = {
+ .request_pool = &cifs_io_request_pool,
+ .subrequest_pool = &cifs_io_subrequest_pool,
+ .init_request = cifs_init_request,
+ .free_request = cifs_free_request,
+ .free_subrequest = cifs_free_subrequest,
+ .expand_readahead = cifs_expand_readahead,
+ .clamp_length = cifs_clamp_length,
+ .issue_read = cifs_req_issue_read,
+ .done = cifs_rreq_done,
+ .post_modify = cifs_post_modify,
+ .begin_writeback = cifs_begin_writeback,
+ .prepare_write = cifs_prepare_write,
+ .issue_write = cifs_issue_write,
+};
+
/*
* Mark as invalid, all open files on tree connections since they
* were closed when session to server was lost.
@@ -206,12 +396,12 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
*/
}
-static inline int cifs_convert_flags(unsigned int flags)
+static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
{
if ((flags & O_ACCMODE) == O_RDONLY)
return GENERIC_READ;
else if ((flags & O_ACCMODE) == O_WRONLY)
- return GENERIC_WRITE;
+ return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
else if ((flags & O_ACCMODE) == O_RDWR) {
/* GENERIC_ALL is too much permission to request
can cause unnecessary access denied on create */
@@ -348,11 +538,16 @@ static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_
int create_options = CREATE_NOT_DIR;
struct TCP_Server_Info *server = tcon->ses->server;
struct cifs_open_parms oparms;
+ int rdwr_for_fscache = 0;
if (!server->ops->open)
return -ENOSYS;
- desired_access = cifs_convert_flags(f_flags);
+ /* If we're caching, we need to be able to fill in around partial writes. */
+ if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
+ rdwr_for_fscache = 1;
+
+ desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
/*********************************************************************
* open flag mapping table:
@@ -389,6 +584,7 @@ static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_
if (f_flags & O_DIRECT)
create_options |= CREATE_NO_BUFFER;
+retry_open:
oparms = (struct cifs_open_parms) {
.tcon = tcon,
.cifs_sb = cifs_sb,
@@ -400,8 +596,16 @@ static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_
};
rc = server->ops->open(xid, &oparms, oplock, buf);
- if (rc)
+ if (rc) {
+ if (rc == -EACCES && rdwr_for_fscache == 1) {
+ desired_access = cifs_convert_flags(f_flags, 0);
+ rdwr_for_fscache = 2;
+ goto retry_open;
+ }
return rc;
+ }
+ if (rdwr_for_fscache == 2)
+ cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
/* TODO: Add support for calling posix query info but with passing in fid */
if (tcon->unix_ext)
@@ -445,6 +649,7 @@ cifs_down_write(struct rw_semaphore *sem)
}
static void cifsFileInfo_put_work(struct work_struct *work);
+void serverclose_work(struct work_struct *work);
struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
struct tcon_link *tlink, __u32 oplock,
@@ -491,6 +696,7 @@ struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
cfile->tlink = cifs_get_tlink(tlink);
INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
INIT_WORK(&cfile->put, cifsFileInfo_put_work);
+ INIT_WORK(&cfile->serverclose, serverclose_work);
INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
mutex_init(&cfile->fh_mutex);
spin_lock_init(&cfile->file_info_lock);
@@ -582,6 +788,40 @@ static void cifsFileInfo_put_work(struct work_struct *work)
cifsFileInfo_put_final(cifs_file);
}
+void serverclose_work(struct work_struct *work)
+{
+ struct cifsFileInfo *cifs_file = container_of(work,
+ struct cifsFileInfo, serverclose);
+
+ struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
+
+ struct TCP_Server_Info *server = tcon->ses->server;
+ int rc = 0;
+ int retries = 0;
+ int MAX_RETRIES = 4;
+
+ do {
+ if (server->ops->close_getattr)
+ rc = server->ops->close_getattr(0, tcon, cifs_file);
+ else if (server->ops->close)
+ rc = server->ops->close(0, tcon, &cifs_file->fid);
+
+ if (rc == -EBUSY || rc == -EAGAIN) {
+ retries++;
+ msleep(250);
+ }
+ } while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
+ );
+
+ if (retries == MAX_RETRIES)
+ pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
+
+ if (cifs_file->offload)
+ queue_work(fileinfo_put_wq, &cifs_file->put);
+ else
+ cifsFileInfo_put_final(cifs_file);
+}
+
/**
* cifsFileInfo_put - release a reference of file priv data
*
@@ -622,10 +862,13 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
struct cifs_fid fid = {};
struct cifs_pending_open open;
bool oplock_break_cancelled;
+ bool serverclose_offloaded = false;
spin_lock(&tcon->open_file_lock);
spin_lock(&cifsi->open_file_lock);
spin_lock(&cifs_file->file_info_lock);
+
+ cifs_file->offload = offload;
if (--cifs_file->count > 0) {
spin_unlock(&cifs_file->file_info_lock);
spin_unlock(&cifsi->open_file_lock);
@@ -667,13 +910,20 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
struct TCP_Server_Info *server = tcon->ses->server;
unsigned int xid;
+ int rc = 0;
xid = get_xid();
if (server->ops->close_getattr)
- server->ops->close_getattr(xid, tcon, cifs_file);
+ rc = server->ops->close_getattr(xid, tcon, cifs_file);
else if (server->ops->close)
- server->ops->close(xid, tcon, &cifs_file->fid);
+ rc = server->ops->close(xid, tcon, &cifs_file->fid);
_free_xid(xid);
+
+ if (rc == -EBUSY || rc == -EAGAIN) {
+ // Server close failed, hence offloading it as an async op
+ queue_work(serverclose_wq, &cifs_file->serverclose);
+ serverclose_offloaded = true;
+ }
}
if (oplock_break_cancelled)
@@ -681,10 +931,15 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
cifs_del_pending_open(&open);
- if (offload)
- queue_work(fileinfo_put_wq, &cifs_file->put);
- else
- cifsFileInfo_put_final(cifs_file);
+ // if serverclose has been offloaded to wq (on failure), it will
+ // handle offloading put as well. If serverclose not offloaded,
+ // we need to handle offloading put here.
+ if (!serverclose_offloaded) {
+ if (offload)
+ queue_work(fileinfo_put_wq, &cifs_file->put);
+ else
+ cifsFileInfo_put_final(cifs_file);
+ }
}
int cifs_open(struct inode *inode, struct file *file)
@@ -834,11 +1089,11 @@ int cifs_open(struct inode *inode, struct file *file)
use_cache:
fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
file->f_mode & FMODE_WRITE);
- if (file->f_flags & O_DIRECT &&
- (!((file->f_flags & O_ACCMODE) != O_RDONLY) ||
- file->f_flags & O_APPEND))
- cifs_invalidate_cache(file_inode(file),
- FSCACHE_INVAL_DIO_WRITE);
+ if (!(file->f_flags & O_DIRECT))
+ goto out;
+ if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
+ goto out;
+ cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
out:
free_dentry_path(page);
@@ -903,6 +1158,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
int disposition = FILE_OPEN;
int create_options = CREATE_NOT_DIR;
struct cifs_open_parms oparms;
+ int rdwr_for_fscache = 0;
xid = get_xid();
mutex_lock(&cfile->fh_mutex);
@@ -966,7 +1222,11 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
}
#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
- desired_access = cifs_convert_flags(cfile->f_flags);
+ /* If we're caching, we need to be able to fill in around partial writes. */
+ if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
+ rdwr_for_fscache = 1;
+
+ desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
/* O_SYNC also has bit for O_DSYNC so following check picks up either */
if (cfile->f_flags & O_SYNC)
@@ -978,6 +1238,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
if (server->ops->get_lease_key)
server->ops->get_lease_key(inode, &cfile->fid);
+retry_open:
oparms = (struct cifs_open_parms) {
.tcon = tcon,
.cifs_sb = cifs_sb,
@@ -1003,6 +1264,11 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
/* indicate that we need to relock the file */
oparms.reconnect = true;
}
+ if (rc == -EACCES && rdwr_for_fscache == 1) {
+ desired_access = cifs_convert_flags(cfile->f_flags, 0);
+ rdwr_for_fscache = 2;
+ goto retry_open;
+ }
if (rc) {
mutex_unlock(&cfile->fh_mutex);
@@ -1011,6 +1277,9 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
goto reopen_error_exit;
}
+ if (rdwr_for_fscache == 2)
+ cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
+
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
reopen_success:
#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
@@ -2128,102 +2397,20 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
return rc;
}
-/*
- * update the file size (if needed) after a write. Should be called with
- * the inode->i_lock held
- */
-void
-cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
- unsigned int bytes_written)
+void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result,
+ bool was_async)
{
- loff_t end_of_write = offset + bytes_written;
-
- if (end_of_write > cifsi->netfs.remote_i_size)
- netfs_resize_file(&cifsi->netfs, end_of_write, true);
-}
-
-static ssize_t
-cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
- size_t write_size, loff_t *offset)
-{
- int rc = 0;
- unsigned int bytes_written = 0;
- unsigned int total_written;
- struct cifs_tcon *tcon;
- struct TCP_Server_Info *server;
- unsigned int xid;
- struct dentry *dentry = open_file->dentry;
- struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
- struct cifs_io_parms io_parms = {0};
-
- cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
- write_size, *offset, dentry);
+ struct netfs_io_request *wreq = wdata->rreq;
+ loff_t new_server_eof;
- tcon = tlink_tcon(open_file->tlink);
- server = tcon->ses->server;
-
- if (!server->ops->sync_write)
- return -ENOSYS;
-
- xid = get_xid();
+ if (result > 0) {
+ new_server_eof = wdata->subreq.start + wdata->subreq.transferred + result;
- for (total_written = 0; write_size > total_written;
- total_written += bytes_written) {
- rc = -EAGAIN;
- while (rc == -EAGAIN) {
- struct kvec iov[2];
- unsigned int len;
-
- if (open_file->invalidHandle) {
- /* we could deadlock if we called
- filemap_fdatawait from here so tell
- reopen_file not to flush data to
- server now */
- rc = cifs_reopen_file(open_file, false);
- if (rc != 0)
- break;
- }
-
- len = min(server->ops->wp_retry_size(d_inode(dentry)),
- (unsigned int)write_size - total_written);
- /* iov[0] is reserved for smb header */
- iov[1].iov_base = (char *)write_data + total_written;
- iov[1].iov_len = len;
- io_parms.pid = pid;
- io_parms.tcon = tcon;
- io_parms.offset = *offset;
- io_parms.length = len;
- rc = server->ops->sync_write(xid, &open_file->fid,
- &io_parms, &bytes_written, iov, 1);
- }
- if (rc || (bytes_written == 0)) {
- if (total_written)
- break;
- else {
- free_xid(xid);
- return rc;
- }
- } else {
- spin_lock(&d_inode(dentry)->i_lock);
- cifs_update_eof(cifsi, *offset, bytes_written);
- spin_unlock(&d_inode(dentry)->i_lock);
- *offset += bytes_written;
- }
+ if (new_server_eof > netfs_inode(wreq->inode)->remote_i_size)
+ netfs_resize_file(netfs_inode(wreq->inode), new_server_eof, true);
}
- cifs_stats_bytes_written(tcon, total_written);
-
- if (total_written > 0) {
- spin_lock(&d_inode(dentry)->i_lock);
- if (*offset > d_inode(dentry)->i_size) {
- i_size_write(d_inode(dentry), *offset);
- d_inode(dentry)->i_blocks = (512 - 1 + *offset) >> 9;
- }
- spin_unlock(&d_inode(dentry)->i_lock);
- }
- mark_inode_dirty_sync(d_inode(dentry));
- free_xid(xid);
- return total_written;
+ netfs_write_subrequest_terminated(&wdata->subreq, result, was_async);
}
struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
@@ -2430,737 +2617,9 @@ cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
return -ENOENT;
}
-void
-cifs_writedata_release(struct kref *refcount)
-{
- struct cifs_writedata *wdata = container_of(refcount,
- struct cifs_writedata, refcount);
-#ifdef CONFIG_CIFS_SMB_DIRECT
- if (wdata->mr) {
- smbd_deregister_mr(wdata->mr);
- wdata->mr = NULL;
- }
-#endif
-
- if (wdata->cfile)
- cifsFileInfo_put(wdata->cfile);
-
- kfree(wdata);
-}
-
-/*
- * Write failed with a retryable error. Resend the write request. It's also
- * possible that the page was redirtied so re-clean the page.
- */
-static void
-cifs_writev_requeue(struct cifs_writedata *wdata)
-{
- int rc = 0;
- struct inode *inode = d_inode(wdata->cfile->dentry);
- struct TCP_Server_Info *server;
- unsigned int rest_len = wdata->bytes;
- loff_t fpos = wdata->offset;
-
- server = tlink_tcon(wdata->cfile->tlink)->ses->server;
- do {
- struct cifs_writedata *wdata2;
- unsigned int wsize, cur_len;
-
- wsize = server->ops->wp_retry_size(inode);
- if (wsize < rest_len) {
- if (wsize < PAGE_SIZE) {
- rc = -EOPNOTSUPP;
- break;
- }
- cur_len = min(round_down(wsize, PAGE_SIZE), rest_len);
- } else {
- cur_len = rest_len;
- }
-
- wdata2 = cifs_writedata_alloc(cifs_writev_complete);
- if (!wdata2) {
- rc = -ENOMEM;
- break;
- }
-
- wdata2->sync_mode = wdata->sync_mode;
- wdata2->offset = fpos;
- wdata2->bytes = cur_len;
- wdata2->iter = wdata->iter;
-
- iov_iter_advance(&wdata2->iter, fpos - wdata->offset);
- iov_iter_truncate(&wdata2->iter, wdata2->bytes);
-
- if (iov_iter_is_xarray(&wdata2->iter))
- /* Check for pages having been redirtied and clean
- * them. We can do this by walking the xarray. If
- * it's not an xarray, then it's a DIO and we shouldn't
- * be mucking around with the page bits.
- */
- cifs_undirty_folios(inode, fpos, cur_len);
-
- rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY,
- &wdata2->cfile);
- if (!wdata2->cfile) {
- cifs_dbg(VFS, "No writable handle to retry writepages rc=%d\n",
- rc);
- if (!is_retryable_error(rc))
- rc = -EBADF;
- } else {
- wdata2->pid = wdata2->cfile->pid;
- rc = server->ops->async_writev(wdata2,
- cifs_writedata_release);
- }
-
- kref_put(&wdata2->refcount, cifs_writedata_release);
- if (rc) {
- if (is_retryable_error(rc))
- continue;
- fpos += cur_len;
- rest_len -= cur_len;
- break;
- }
-
- fpos += cur_len;
- rest_len -= cur_len;
- } while (rest_len > 0);
-
- /* Clean up remaining pages from the original wdata */
- if (iov_iter_is_xarray(&wdata->iter))
- cifs_pages_write_failed(inode, fpos, rest_len);
-
- if (rc != 0 && !is_retryable_error(rc))
- mapping_set_error(inode->i_mapping, rc);
- kref_put(&wdata->refcount, cifs_writedata_release);
-}
-
-void
-cifs_writev_complete(struct work_struct *work)
-{
- struct cifs_writedata *wdata = container_of(work,
- struct cifs_writedata, work);
- struct inode *inode = d_inode(wdata->cfile->dentry);
-
- if (wdata->result == 0) {
- spin_lock(&inode->i_lock);
- cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes);
- spin_unlock(&inode->i_lock);
- cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink),
- wdata->bytes);
- } else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN)
- return cifs_writev_requeue(wdata);
-
- if (wdata->result == -EAGAIN)
- cifs_pages_write_redirty(inode, wdata->offset, wdata->bytes);
- else if (wdata->result < 0)
- cifs_pages_write_failed(inode, wdata->offset, wdata->bytes);
- else
- cifs_pages_written_back(inode, wdata->offset, wdata->bytes);
-
- if (wdata->result != -EAGAIN)
- mapping_set_error(inode->i_mapping, wdata->result);
- kref_put(&wdata->refcount, cifs_writedata_release);
-}
-
-struct cifs_writedata *cifs_writedata_alloc(work_func_t complete)
-{
- struct cifs_writedata *wdata;
-
- wdata = kzalloc(sizeof(*wdata), GFP_NOFS);
- if (wdata != NULL) {
- kref_init(&wdata->refcount);
- INIT_LIST_HEAD(&wdata->list);
- init_completion(&wdata->done);
- INIT_WORK(&wdata->work, complete);
- }
- return wdata;
-}
-
-static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
-{
- struct address_space *mapping = page->mapping;
- loff_t offset = (loff_t)page->index << PAGE_SHIFT;
- char *write_data;
- int rc = -EFAULT;
- int bytes_written = 0;
- struct inode *inode;
- struct cifsFileInfo *open_file;
-
- if (!mapping || !mapping->host)
- return -EFAULT;
-
- inode = page->mapping->host;
-
- offset += (loff_t)from;
- write_data = kmap(page);
- write_data += from;
-
- if ((to > PAGE_SIZE) || (from > to)) {
- kunmap(page);
- return -EIO;
- }
-
- /* racing with truncate? */
- if (offset > mapping->host->i_size) {
- kunmap(page);
- return 0; /* don't care */
- }
-
- /* check to make sure that we are not extending the file */
- if (mapping->host->i_size - offset < (loff_t)to)
- to = (unsigned)(mapping->host->i_size - offset);
-
- rc = cifs_get_writable_file(CIFS_I(mapping->host), FIND_WR_ANY,
- &open_file);
- if (!rc) {
- bytes_written = cifs_write(open_file, open_file->pid,
- write_data, to - from, &offset);
- cifsFileInfo_put(open_file);
- /* Does mm or vfs already set times? */
- simple_inode_init_ts(inode);
- if ((bytes_written > 0) && (offset))
- rc = 0;
- else if (bytes_written < 0)
- rc = bytes_written;
- else
- rc = -EFAULT;
- } else {
- cifs_dbg(FYI, "No writable handle for write page rc=%d\n", rc);
- if (!is_retryable_error(rc))
- rc = -EIO;
- }
-
- kunmap(page);
- return rc;
-}
-
-/*
- * Extend the region to be written back to include subsequent contiguously
- * dirty pages if possible, but don't sleep while doing so.
- */
-static void cifs_extend_writeback(struct address_space *mapping,
- struct xa_state *xas,
- long *_count,
- loff_t start,
- int max_pages,
- loff_t max_len,
- size_t *_len)
-{
- struct folio_batch batch;
- struct folio *folio;
- unsigned int nr_pages;
- pgoff_t index = (start + *_len) / PAGE_SIZE;
- size_t len;
- bool stop = true;
- unsigned int i;
-
- folio_batch_init(&batch);
-
- do {
- /* Firstly, we gather up a batch of contiguous dirty pages
- * under the RCU read lock - but we can't clear the dirty flags
- * there if any of those pages are mapped.
- */
- rcu_read_lock();
-
- xas_for_each(xas, folio, ULONG_MAX) {
- stop = true;
- if (xas_retry(xas, folio))
- continue;
- if (xa_is_value(folio))
- break;
- if (folio->index != index) {
- xas_reset(xas);
- break;
- }
-
- if (!folio_try_get_rcu(folio)) {
- xas_reset(xas);
- continue;
- }
- nr_pages = folio_nr_pages(folio);
- if (nr_pages > max_pages) {
- xas_reset(xas);
- break;
- }
-
- /* Has the page moved or been split? */
- if (unlikely(folio != xas_reload(xas))) {
- folio_put(folio);
- xas_reset(xas);
- break;
- }
-
- if (!folio_trylock(folio)) {
- folio_put(folio);
- xas_reset(xas);
- break;
- }
- if (!folio_test_dirty(folio) ||
- folio_test_writeback(folio)) {
- folio_unlock(folio);
- folio_put(folio);
- xas_reset(xas);
- break;
- }
-
- max_pages -= nr_pages;
- len = folio_size(folio);
- stop = false;
-
- index += nr_pages;
- *_count -= nr_pages;
- *_len += len;
- if (max_pages <= 0 || *_len >= max_len || *_count <= 0)
- stop = true;
-
- if (!folio_batch_add(&batch, folio))
- break;
- if (stop)
- break;
- }
-
- xas_pause(xas);
- rcu_read_unlock();
-
- /* Now, if we obtained any pages, we can shift them to being
- * writable and mark them for caching.
- */
- if (!folio_batch_count(&batch))
- break;
-
- for (i = 0; i < folio_batch_count(&batch); i++) {
- folio = batch.folios[i];
- /* The folio should be locked, dirty and not undergoing
- * writeback from the loop above.
- */
- if (!folio_clear_dirty_for_io(folio))
- WARN_ON(1);
- folio_start_writeback(folio);
- folio_unlock(folio);
- }
-
- folio_batch_release(&batch);
- cond_resched();
- } while (!stop);
-}
-
-/*
- * Write back the locked page and any subsequent non-locked dirty pages.
- */
-static ssize_t cifs_write_back_from_locked_folio(struct address_space *mapping,
- struct writeback_control *wbc,
- struct xa_state *xas,
- struct folio *folio,
- unsigned long long start,
- unsigned long long end)
-{
- struct inode *inode = mapping->host;
- struct TCP_Server_Info *server;
- struct cifs_writedata *wdata;
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- struct cifs_credits credits_on_stack;
- struct cifs_credits *credits = &credits_on_stack;
- struct cifsFileInfo *cfile = NULL;
- unsigned long long i_size = i_size_read(inode), max_len;
- unsigned int xid, wsize;
- size_t len = folio_size(folio);
- long count = wbc->nr_to_write;
- int rc;
-
- /* The folio should be locked, dirty and not undergoing writeback. */
- if (!folio_clear_dirty_for_io(folio))
- WARN_ON_ONCE(1);
- folio_start_writeback(folio);
-
- count -= folio_nr_pages(folio);
-
- xid = get_xid();
- server = cifs_pick_channel(cifs_sb_master_tcon(cifs_sb)->ses);
-
- rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile);
- if (rc) {
- cifs_dbg(VFS, "No writable handle in writepages rc=%d\n", rc);
- goto err_xid;
- }
-
- rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize,
- &wsize, credits);
- if (rc != 0)
- goto err_close;
-
- wdata = cifs_writedata_alloc(cifs_writev_complete);
- if (!wdata) {
- rc = -ENOMEM;
- goto err_uncredit;
- }
-
- wdata->sync_mode = wbc->sync_mode;
- wdata->offset = folio_pos(folio);
- wdata->pid = cfile->pid;
- wdata->credits = credits_on_stack;
- wdata->cfile = cfile;
- wdata->server = server;
- cfile = NULL;
-
- /* Find all consecutive lockable dirty pages that have contiguous
- * written regions, stopping when we find a page that is not
- * immediately lockable, is not dirty or is missing, or we reach the
- * end of the range.
- */
- if (start < i_size) {
- /* Trim the write to the EOF; the extra data is ignored. Also
- * put an upper limit on the size of a single storedata op.
- */
- max_len = wsize;
- max_len = min_t(unsigned long long, max_len, end - start + 1);
- max_len = min_t(unsigned long long, max_len, i_size - start);
-
- if (len < max_len) {
- int max_pages = INT_MAX;
-
-#ifdef CONFIG_CIFS_SMB_DIRECT
- if (server->smbd_conn)
- max_pages = server->smbd_conn->max_frmr_depth;
-#endif
- max_pages -= folio_nr_pages(folio);
-
- if (max_pages > 0)
- cifs_extend_writeback(mapping, xas, &count, start,
- max_pages, max_len, &len);
- }
- }
- len = min_t(unsigned long long, len, i_size - start);
-
- /* We now have a contiguous set of dirty pages, each with writeback
- * set; the first page is still locked at this point, but all the rest
- * have been unlocked.
- */
- folio_unlock(folio);
- wdata->bytes = len;
-
- if (start < i_size) {
- iov_iter_xarray(&wdata->iter, ITER_SOURCE, &mapping->i_pages,
- start, len);
-
- rc = adjust_credits(wdata->server, &wdata->credits, wdata->bytes);
- if (rc)
- goto err_wdata;
-
- if (wdata->cfile->invalidHandle)
- rc = -EAGAIN;
- else
- rc = wdata->server->ops->async_writev(wdata,
- cifs_writedata_release);
- if (rc >= 0) {
- kref_put(&wdata->refcount, cifs_writedata_release);
- goto err_close;
- }
- } else {
- /* The dirty region was entirely beyond the EOF. */
- cifs_pages_written_back(inode, start, len);
- rc = 0;
- }
-
-err_wdata:
- kref_put(&wdata->refcount, cifs_writedata_release);
-err_uncredit:
- add_credits_and_wake_if(server, credits, 0);
-err_close:
- if (cfile)
- cifsFileInfo_put(cfile);
-err_xid:
- free_xid(xid);
- if (rc == 0) {
- wbc->nr_to_write = count;
- rc = len;
- } else if (is_retryable_error(rc)) {
- cifs_pages_write_redirty(inode, start, len);
- } else {
- cifs_pages_write_failed(inode, start, len);
- mapping_set_error(mapping, rc);
- }
- /* Indication to update ctime and mtime as close is deferred */
- set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
- return rc;
-}
-
-/*
- * write a region of pages back to the server
- */
-static ssize_t cifs_writepages_begin(struct address_space *mapping,
- struct writeback_control *wbc,
- struct xa_state *xas,
- unsigned long long *_start,
- unsigned long long end)
-{
- struct folio *folio;
- unsigned long long start = *_start;
- ssize_t ret;
- int skips = 0;
-
-search_again:
- /* Find the first dirty page. */
- rcu_read_lock();
-
- for (;;) {
- folio = xas_find_marked(xas, end / PAGE_SIZE, PAGECACHE_TAG_DIRTY);
- if (xas_retry(xas, folio) || xa_is_value(folio))
- continue;
- if (!folio)
- break;
-
- if (!folio_try_get_rcu(folio)) {
- xas_reset(xas);
- continue;
- }
-
- if (unlikely(folio != xas_reload(xas))) {
- folio_put(folio);
- xas_reset(xas);
- continue;
- }
-
- xas_pause(xas);
- break;
- }
- rcu_read_unlock();
- if (!folio)
- return 0;
-
- start = folio_pos(folio); /* May regress with THPs */
-
- /* At this point we hold neither the i_pages lock nor the page lock:
- * the page may be truncated or invalidated (changing page->mapping to
- * NULL), or even swizzled back from swapper_space to tmpfs file
- * mapping
- */
-lock_again:
- if (wbc->sync_mode != WB_SYNC_NONE) {
- ret = folio_lock_killable(folio);
- if (ret < 0)
- return ret;
- } else {
- if (!folio_trylock(folio))
- goto search_again;
- }
-
- if (folio->mapping != mapping ||
- !folio_test_dirty(folio)) {
- start += folio_size(folio);
- folio_unlock(folio);
- goto search_again;
- }
-
- if (folio_test_writeback(folio) ||
- folio_test_fscache(folio)) {
- folio_unlock(folio);
- if (wbc->sync_mode != WB_SYNC_NONE) {
- folio_wait_writeback(folio);
-#ifdef CONFIG_CIFS_FSCACHE
- folio_wait_fscache(folio);
-#endif
- goto lock_again;
- }
-
- start += folio_size(folio);
- if (wbc->sync_mode == WB_SYNC_NONE) {
- if (skips >= 5 || need_resched()) {
- ret = 0;
- goto out;
- }
- skips++;
- }
- goto search_again;
- }
-
- ret = cifs_write_back_from_locked_folio(mapping, wbc, xas, folio, start, end);
-out:
- if (ret > 0)
- *_start = start + ret;
- return ret;
-}
-
/*
- * Write a region of pages back to the server
+ * Flush data on a strict file.
*/
-static int cifs_writepages_region(struct address_space *mapping,
- struct writeback_control *wbc,
- unsigned long long *_start,
- unsigned long long end)
-{
- ssize_t ret;
-
- XA_STATE(xas, &mapping->i_pages, *_start / PAGE_SIZE);
-
- do {
- ret = cifs_writepages_begin(mapping, wbc, &xas, _start, end);
- if (ret > 0 && wbc->nr_to_write > 0)
- cond_resched();
- } while (ret > 0 && wbc->nr_to_write > 0);
-
- return ret > 0 ? 0 : ret;
-}
-
-/*
- * Write some of the pending data back to the server
- */
-static int cifs_writepages(struct address_space *mapping,
- struct writeback_control *wbc)
-{
- loff_t start, end;
- int ret;
-
- /* We have to be careful as we can end up racing with setattr()
- * truncating the pagecache since the caller doesn't take a lock here
- * to prevent it.
- */
-
- if (wbc->range_cyclic && mapping->writeback_index) {
- start = mapping->writeback_index * PAGE_SIZE;
- ret = cifs_writepages_region(mapping, wbc, &start, LLONG_MAX);
- if (ret < 0)
- goto out;
-
- if (wbc->nr_to_write <= 0) {
- mapping->writeback_index = start / PAGE_SIZE;
- goto out;
- }
-
- start = 0;
- end = mapping->writeback_index * PAGE_SIZE;
- mapping->writeback_index = 0;
- ret = cifs_writepages_region(mapping, wbc, &start, end);
- if (ret == 0)
- mapping->writeback_index = start / PAGE_SIZE;
- } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
- start = 0;
- ret = cifs_writepages_region(mapping, wbc, &start, LLONG_MAX);
- if (wbc->nr_to_write > 0 && ret == 0)
- mapping->writeback_index = start / PAGE_SIZE;
- } else {
- start = wbc->range_start;
- ret = cifs_writepages_region(mapping, wbc, &start, wbc->range_end);
- }
-
-out:
- return ret;
-}
-
-static int
-cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
-{
- int rc;
- unsigned int xid;
-
- xid = get_xid();
-/* BB add check for wbc flags */
- get_page(page);
- if (!PageUptodate(page))
- cifs_dbg(FYI, "ppw - page not up to date\n");
-
- /*
- * Set the "writeback" flag, and clear "dirty" in the radix tree.
- *
- * A writepage() implementation always needs to do either this,
- * or re-dirty the page with "redirty_page_for_writepage()" in
- * the case of a failure.
- *
- * Just unlocking the page will cause the radix tree tag-bits
- * to fail to update with the state of the page correctly.
- */
- set_page_writeback(page);
-retry_write:
- rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
- if (is_retryable_error(rc)) {
- if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
- goto retry_write;
- redirty_page_for_writepage(wbc, page);
- } else if (rc != 0) {
- SetPageError(page);
- mapping_set_error(page->mapping, rc);
- } else {
- SetPageUptodate(page);
- }
- end_page_writeback(page);
- put_page(page);
- free_xid(xid);
- return rc;
-}
-
-static int cifs_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
-{
- int rc;
- struct inode *inode = mapping->host;
- struct cifsFileInfo *cfile = file->private_data;
- struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
- struct folio *folio = page_folio(page);
- __u32 pid;
-
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
- pid = cfile->pid;
- else
- pid = current->tgid;
-
- cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
- page, pos, copied);
-
- if (folio_test_checked(folio)) {
- if (copied == len)
- folio_mark_uptodate(folio);
- folio_clear_checked(folio);
- } else if (!folio_test_uptodate(folio) && copied == PAGE_SIZE)
- folio_mark_uptodate(folio);
-
- if (!folio_test_uptodate(folio)) {
- char *page_data;
- unsigned offset = pos & (PAGE_SIZE - 1);
- unsigned int xid;
-
- xid = get_xid();
- /* this is probably better than directly calling
- partialpage_write since in this function the file handle is
- known which we might as well leverage */
- /* BB check if anything else missing out of ppw
- such as updating last write time */
- page_data = kmap(page);
- rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
- /* if (rc < 0) should we set writebehind rc? */
- kunmap(page);
-
- free_xid(xid);
- } else {
- rc = copied;
- pos += copied;
- set_page_dirty(page);
- }
-
- if (rc > 0) {
- spin_lock(&inode->i_lock);
- if (pos > inode->i_size) {
- loff_t additional_blocks = (512 - 1 + copied) >> 9;
-
- i_size_write(inode, pos);
- /*
- * Estimate new allocation size based on the amount written.
- * This will be updated from server on close (and on queryinfo)
- */
- inode->i_blocks = min_t(blkcnt_t, (512 - 1 + pos) >> 9,
- inode->i_blocks + additional_blocks);
- }
- spin_unlock(&inode->i_lock);
- }
-
- unlock_page(page);
- put_page(page);
- /* Indication to update ctime and mtime as close is deferred */
- set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
-
- return rc;
-}
-
int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
int datasync)
{
@@ -3215,6 +2674,9 @@ strict_fsync_exit:
return rc;
}
+/*
+ * Flush data on a non-strict data.
+ */
int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
unsigned int xid;
@@ -3281,481 +2743,6 @@ int cifs_flush(struct file *file, fl_owner_t id)
return rc;
}
-static void
-cifs_uncached_writedata_release(struct kref *refcount)
-{
- struct cifs_writedata *wdata = container_of(refcount,
- struct cifs_writedata, refcount);
-
- kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
- cifs_writedata_release(refcount);
-}
-
-static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
-
-static void
-cifs_uncached_writev_complete(struct work_struct *work)
-{
- struct cifs_writedata *wdata = container_of(work,
- struct cifs_writedata, work);
- struct inode *inode = d_inode(wdata->cfile->dentry);
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
-
- spin_lock(&inode->i_lock);
- cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
- if (cifsi->netfs.remote_i_size > inode->i_size)
- i_size_write(inode, cifsi->netfs.remote_i_size);
- spin_unlock(&inode->i_lock);
-
- complete(&wdata->done);
- collect_uncached_write_data(wdata->ctx);
- /* the below call can possibly free the last ref to aio ctx */
- kref_put(&wdata->refcount, cifs_uncached_writedata_release);
-}
-
-static int
-cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
- struct cifs_aio_ctx *ctx)
-{
- unsigned int wsize;
- struct cifs_credits credits;
- int rc;
- struct TCP_Server_Info *server = wdata->server;
-
- do {
- if (wdata->cfile->invalidHandle) {
- rc = cifs_reopen_file(wdata->cfile, false);
- if (rc == -EAGAIN)
- continue;
- else if (rc)
- break;
- }
-
-
- /*
- * Wait for credits to resend this wdata.
- * Note: we are attempting to resend the whole wdata not in
- * segments
- */
- do {
- rc = server->ops->wait_mtu_credits(server, wdata->bytes,
- &wsize, &credits);
- if (rc)
- goto fail;
-
- if (wsize < wdata->bytes) {
- add_credits_and_wake_if(server, &credits, 0);
- msleep(1000);
- }
- } while (wsize < wdata->bytes);
- wdata->credits = credits;
-
- rc = adjust_credits(server, &wdata->credits, wdata->bytes);
-
- if (!rc) {
- if (wdata->cfile->invalidHandle)
- rc = -EAGAIN;
- else {
- wdata->replay = true;
-#ifdef CONFIG_CIFS_SMB_DIRECT
- if (wdata->mr) {
- wdata->mr->need_invalidate = true;
- smbd_deregister_mr(wdata->mr);
- wdata->mr = NULL;
- }
-#endif
- rc = server->ops->async_writev(wdata,
- cifs_uncached_writedata_release);
- }
- }
-
- /* If the write was successfully sent, we are done */
- if (!rc) {
- list_add_tail(&wdata->list, wdata_list);
- return 0;
- }
-
- /* Roll back credits and retry if needed */
- add_credits_and_wake_if(server, &wdata->credits, 0);
- } while (rc == -EAGAIN);
-
-fail:
- kref_put(&wdata->refcount, cifs_uncached_writedata_release);
- return rc;
-}
-
-/*
- * Select span of a bvec iterator we're going to use. Limit it by both maximum
- * size and maximum number of segments.
- */
-static size_t cifs_limit_bvec_subset(const struct iov_iter *iter, size_t max_size,
- size_t max_segs, unsigned int *_nsegs)
-{
- const struct bio_vec *bvecs = iter->bvec;
- unsigned int nbv = iter->nr_segs, ix = 0, nsegs = 0;
- size_t len, span = 0, n = iter->count;
- size_t skip = iter->iov_offset;
-
- if (WARN_ON(!iov_iter_is_bvec(iter)) || n == 0)
- return 0;
-
- while (n && ix < nbv && skip) {
- len = bvecs[ix].bv_len;
- if (skip < len)
- break;
- skip -= len;
- n -= len;
- ix++;
- }
-
- while (n && ix < nbv) {
- len = min3(n, bvecs[ix].bv_len - skip, max_size);
- span += len;
- max_size -= len;
- nsegs++;
- ix++;
- if (max_size == 0 || nsegs >= max_segs)
- break;
- skip = 0;
- n -= len;
- }
-
- *_nsegs = nsegs;
- return span;
-}
-
-static int
-cifs_write_from_iter(loff_t fpos, size_t len, struct iov_iter *from,
- struct cifsFileInfo *open_file,
- struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
- struct cifs_aio_ctx *ctx)
-{
- int rc = 0;
- size_t cur_len, max_len;
- struct cifs_writedata *wdata;
- pid_t pid;
- struct TCP_Server_Info *server;
- unsigned int xid, max_segs = INT_MAX;
-
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
- pid = open_file->pid;
- else
- pid = current->tgid;
-
- server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
- xid = get_xid();
-
-#ifdef CONFIG_CIFS_SMB_DIRECT
- if (server->smbd_conn)
- max_segs = server->smbd_conn->max_frmr_depth;
-#endif
-
- do {
- struct cifs_credits credits_on_stack;
- struct cifs_credits *credits = &credits_on_stack;
- unsigned int wsize, nsegs = 0;
-
- if (signal_pending(current)) {
- rc = -EINTR;
- break;
- }
-
- if (open_file->invalidHandle) {
- rc = cifs_reopen_file(open_file, false);
- if (rc == -EAGAIN)
- continue;
- else if (rc)
- break;
- }
-
- rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize,
- &wsize, credits);
- if (rc)
- break;
-
- max_len = min_t(const size_t, len, wsize);
- if (!max_len) {
- rc = -EAGAIN;
- add_credits_and_wake_if(server, credits, 0);
- break;
- }
-
- cur_len = cifs_limit_bvec_subset(from, max_len, max_segs, &nsegs);
- cifs_dbg(FYI, "write_from_iter len=%zx/%zx nsegs=%u/%lu/%u\n",
- cur_len, max_len, nsegs, from->nr_segs, max_segs);
- if (cur_len == 0) {
- rc = -EIO;
- add_credits_and_wake_if(server, credits, 0);
- break;
- }
-
- wdata = cifs_writedata_alloc(cifs_uncached_writev_complete);
- if (!wdata) {
- rc = -ENOMEM;
- add_credits_and_wake_if(server, credits, 0);
- break;
- }
-
- wdata->sync_mode = WB_SYNC_ALL;
- wdata->offset = (__u64)fpos;
- wdata->cfile = cifsFileInfo_get(open_file);
- wdata->server = server;
- wdata->pid = pid;
- wdata->bytes = cur_len;
- wdata->credits = credits_on_stack;
- wdata->iter = *from;
- wdata->ctx = ctx;
- kref_get(&ctx->refcount);
-
- iov_iter_truncate(&wdata->iter, cur_len);
-
- rc = adjust_credits(server, &wdata->credits, wdata->bytes);
-
- if (!rc) {
- if (wdata->cfile->invalidHandle)
- rc = -EAGAIN;
- else
- rc = server->ops->async_writev(wdata,
- cifs_uncached_writedata_release);
- }
-
- if (rc) {
- add_credits_and_wake_if(server, &wdata->credits, 0);
- kref_put(&wdata->refcount,
- cifs_uncached_writedata_release);
- if (rc == -EAGAIN)
- continue;
- break;
- }
-
- list_add_tail(&wdata->list, wdata_list);
- iov_iter_advance(from, cur_len);
- fpos += cur_len;
- len -= cur_len;
- } while (len > 0);
-
- free_xid(xid);
- return rc;
-}
-
-static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
-{
- struct cifs_writedata *wdata, *tmp;
- struct cifs_tcon *tcon;
- struct cifs_sb_info *cifs_sb;
- struct dentry *dentry = ctx->cfile->dentry;
- ssize_t rc;
-
- tcon = tlink_tcon(ctx->cfile->tlink);
- cifs_sb = CIFS_SB(dentry->d_sb);
-
- mutex_lock(&ctx->aio_mutex);
-
- if (list_empty(&ctx->list)) {
- mutex_unlock(&ctx->aio_mutex);
- return;
- }
-
- rc = ctx->rc;
- /*
- * Wait for and collect replies for any successful sends in order of
- * increasing offset. Once an error is hit, then return without waiting
- * for any more replies.
- */
-restart_loop:
- list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
- if (!rc) {
- if (!try_wait_for_completion(&wdata->done)) {
- mutex_unlock(&ctx->aio_mutex);
- return;
- }
-
- if (wdata->result)
- rc = wdata->result;
- else
- ctx->total_len += wdata->bytes;
-
- /* resend call if it's a retryable error */
- if (rc == -EAGAIN) {
- struct list_head tmp_list;
- struct iov_iter tmp_from = ctx->iter;
-
- INIT_LIST_HEAD(&tmp_list);
- list_del_init(&wdata->list);
-
- if (ctx->direct_io)
- rc = cifs_resend_wdata(
- wdata, &tmp_list, ctx);
- else {
- iov_iter_advance(&tmp_from,
- wdata->offset - ctx->pos);
-
- rc = cifs_write_from_iter(wdata->offset,
- wdata->bytes, &tmp_from,
- ctx->cfile, cifs_sb, &tmp_list,
- ctx);
-
- kref_put(&wdata->refcount,
- cifs_uncached_writedata_release);
- }
-
- list_splice(&tmp_list, &ctx->list);
- goto restart_loop;
- }
- }
- list_del_init(&wdata->list);
- kref_put(&wdata->refcount, cifs_uncached_writedata_release);
- }
-
- cifs_stats_bytes_written(tcon, ctx->total_len);
- set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
-
- ctx->rc = (rc == 0) ? ctx->total_len : rc;
-
- mutex_unlock(&ctx->aio_mutex);
-
- if (ctx->iocb && ctx->iocb->ki_complete)
- ctx->iocb->ki_complete(ctx->iocb, ctx->rc);
- else
- complete(&ctx->done);
-}
-
-static ssize_t __cifs_writev(
- struct kiocb *iocb, struct iov_iter *from, bool direct)
-{
- struct file *file = iocb->ki_filp;
- ssize_t total_written = 0;
- struct cifsFileInfo *cfile;
- struct cifs_tcon *tcon;
- struct cifs_sb_info *cifs_sb;
- struct cifs_aio_ctx *ctx;
- int rc;
-
- rc = generic_write_checks(iocb, from);
- if (rc <= 0)
- return rc;
-
- cifs_sb = CIFS_FILE_SB(file);
- cfile = file->private_data;
- tcon = tlink_tcon(cfile->tlink);
-
- if (!tcon->ses->server->ops->async_writev)
- return -ENOSYS;
-
- ctx = cifs_aio_ctx_alloc();
- if (!ctx)
- return -ENOMEM;
-
- ctx->cfile = cifsFileInfo_get(cfile);
-
- if (!is_sync_kiocb(iocb))
- ctx->iocb = iocb;
-
- ctx->pos = iocb->ki_pos;
- ctx->direct_io = direct;
- ctx->nr_pinned_pages = 0;
-
- if (user_backed_iter(from)) {
- /*
- * Extract IOVEC/UBUF-type iterators to a BVEC-type iterator as
- * they contain references to the calling process's virtual
- * memory layout which won't be available in an async worker
- * thread. This also takes a pin on every folio involved.
- */
- rc = netfs_extract_user_iter(from, iov_iter_count(from),
- &ctx->iter, 0);
- if (rc < 0) {
- kref_put(&ctx->refcount, cifs_aio_ctx_release);
- return rc;
- }
-
- ctx->nr_pinned_pages = rc;
- ctx->bv = (void *)ctx->iter.bvec;
- ctx->bv_need_unpin = iov_iter_extract_will_pin(from);
- } else if ((iov_iter_is_bvec(from) || iov_iter_is_kvec(from)) &&
- !is_sync_kiocb(iocb)) {
- /*
- * If the op is asynchronous, we need to copy the list attached
- * to a BVEC/KVEC-type iterator, but we assume that the storage
- * will be pinned by the caller; in any case, we may or may not
- * be able to pin the pages, so we don't try.
- */
- ctx->bv = (void *)dup_iter(&ctx->iter, from, GFP_KERNEL);
- if (!ctx->bv) {
- kref_put(&ctx->refcount, cifs_aio_ctx_release);
- return -ENOMEM;
- }
- } else {
- /*
- * Otherwise, we just pass the iterator down as-is and rely on
- * the caller to make sure the pages referred to by the
- * iterator don't evaporate.
- */
- ctx->iter = *from;
- }
-
- ctx->len = iov_iter_count(&ctx->iter);
-
- /* grab a lock here due to read response handlers can access ctx */
- mutex_lock(&ctx->aio_mutex);
-
- rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &ctx->iter,
- cfile, cifs_sb, &ctx->list, ctx);
-
- /*
- * If at least one write was successfully sent, then discard any rc
- * value from the later writes. If the other write succeeds, then
- * we'll end up returning whatever was written. If it fails, then
- * we'll get a new rc value from that.
- */
- if (!list_empty(&ctx->list))
- rc = 0;
-
- mutex_unlock(&ctx->aio_mutex);
-
- if (rc) {
- kref_put(&ctx->refcount, cifs_aio_ctx_release);
- return rc;
- }
-
- if (!is_sync_kiocb(iocb)) {
- kref_put(&ctx->refcount, cifs_aio_ctx_release);
- return -EIOCBQUEUED;
- }
-
- rc = wait_for_completion_killable(&ctx->done);
- if (rc) {
- mutex_lock(&ctx->aio_mutex);
- ctx->rc = rc = -EINTR;
- total_written = ctx->total_len;
- mutex_unlock(&ctx->aio_mutex);
- } else {
- rc = ctx->rc;
- total_written = ctx->total_len;
- }
-
- kref_put(&ctx->refcount, cifs_aio_ctx_release);
-
- if (unlikely(!total_written))
- return rc;
-
- iocb->ki_pos += total_written;
- return total_written;
-}
-
-ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
-{
- struct file *file = iocb->ki_filp;
-
- cifs_revalidate_mapping(file->f_inode);
- return __cifs_writev(iocb, from, true);
-}
-
-ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
-{
- return __cifs_writev(iocb, from, false);
-}
-
static ssize_t
cifs_writev(struct kiocb *iocb, struct iov_iter *from)
{
@@ -3766,7 +2753,10 @@ cifs_writev(struct kiocb *iocb, struct iov_iter *from)
struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
ssize_t rc;
- inode_lock(inode);
+ rc = netfs_start_io_write(inode);
+ if (rc < 0)
+ return rc;
+
/*
* We need to hold the sem to be sure nobody modifies lock list
* with a brlock that prevents writing.
@@ -3780,13 +2770,12 @@ cifs_writev(struct kiocb *iocb, struct iov_iter *from)
if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
server->vals->exclusive_lock_type, 0,
NULL, CIFS_WRITE_OP))
- rc = __generic_file_write_iter(iocb, from);
+ rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
else
rc = -EACCES;
out:
up_read(&cinode->lock_sem);
- inode_unlock(inode);
-
+ netfs_end_io_write(inode);
if (rc > 0)
rc = generic_write_sync(iocb, rc);
return rc;
@@ -3809,9 +2798,9 @@ cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
if (CIFS_CACHE_WRITE(cinode)) {
if (cap_unix(tcon->ses) &&
- (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
- && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
- written = generic_file_write_iter(iocb, from);
+ (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
+ ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
+ written = netfs_file_write_iter(iocb, from);
goto out;
}
written = cifs_writev(iocb, from);
@@ -3823,7 +2812,7 @@ cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
* affected pages because it may cause a error with mandatory locks on
* these pages but not on the region from pos to ppos+len-1.
*/
- written = cifs_user_writev(iocb, from);
+ written = netfs_file_write_iter(iocb, from);
if (CIFS_CACHE_READ(cinode)) {
/*
* We have read level caching and we have just sent a write
@@ -3842,449 +2831,55 @@ out:
return written;
}
-static struct cifs_readdata *cifs_readdata_alloc(work_func_t complete)
-{
- struct cifs_readdata *rdata;
-
- rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
- if (rdata) {
- kref_init(&rdata->refcount);
- INIT_LIST_HEAD(&rdata->list);
- init_completion(&rdata->done);
- INIT_WORK(&rdata->work, complete);
- }
-
- return rdata;
-}
-
-void
-cifs_readdata_release(struct kref *refcount)
-{
- struct cifs_readdata *rdata = container_of(refcount,
- struct cifs_readdata, refcount);
-
- if (rdata->ctx)
- kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
-#ifdef CONFIG_CIFS_SMB_DIRECT
- if (rdata->mr) {
- smbd_deregister_mr(rdata->mr);
- rdata->mr = NULL;
- }
-#endif
- if (rdata->cfile)
- cifsFileInfo_put(rdata->cfile);
-
- kfree(rdata);
-}
-
-static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
-
-static void
-cifs_uncached_readv_complete(struct work_struct *work)
-{
- struct cifs_readdata *rdata = container_of(work,
- struct cifs_readdata, work);
-
- complete(&rdata->done);
- collect_uncached_read_data(rdata->ctx);
- /* the below call can possibly free the last ref to aio ctx */
- kref_put(&rdata->refcount, cifs_readdata_release);
-}
-
-static int cifs_resend_rdata(struct cifs_readdata *rdata,
- struct list_head *rdata_list,
- struct cifs_aio_ctx *ctx)
-{
- unsigned int rsize;
- struct cifs_credits credits;
- int rc;
- struct TCP_Server_Info *server;
-
- /* XXX: should we pick a new channel here? */
- server = rdata->server;
-
- do {
- if (rdata->cfile->invalidHandle) {
- rc = cifs_reopen_file(rdata->cfile, true);
- if (rc == -EAGAIN)
- continue;
- else if (rc)
- break;
- }
-
- /*
- * Wait for credits to resend this rdata.
- * Note: we are attempting to resend the whole rdata not in
- * segments
- */
- do {
- rc = server->ops->wait_mtu_credits(server, rdata->bytes,
- &rsize, &credits);
-
- if (rc)
- goto fail;
-
- if (rsize < rdata->bytes) {
- add_credits_and_wake_if(server, &credits, 0);
- msleep(1000);
- }
- } while (rsize < rdata->bytes);
- rdata->credits = credits;
-
- rc = adjust_credits(server, &rdata->credits, rdata->bytes);
- if (!rc) {
- if (rdata->cfile->invalidHandle)
- rc = -EAGAIN;
- else {
-#ifdef CONFIG_CIFS_SMB_DIRECT
- if (rdata->mr) {
- rdata->mr->need_invalidate = true;
- smbd_deregister_mr(rdata->mr);
- rdata->mr = NULL;
- }
-#endif
- rc = server->ops->async_readv(rdata);
- }
- }
-
- /* If the read was successfully sent, we are done */
- if (!rc) {
- /* Add to aio pending list */
- list_add_tail(&rdata->list, rdata_list);
- return 0;
- }
-
- /* Roll back credits and retry if needed */
- add_credits_and_wake_if(server, &rdata->credits, 0);
- } while (rc == -EAGAIN);
-
-fail:
- kref_put(&rdata->refcount, cifs_readdata_release);
- return rc;
-}
-
-static int
-cifs_send_async_read(loff_t fpos, size_t len, struct cifsFileInfo *open_file,
- struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
- struct cifs_aio_ctx *ctx)
+ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{
- struct cifs_readdata *rdata;
- unsigned int rsize, nsegs, max_segs = INT_MAX;
- struct cifs_credits credits_on_stack;
- struct cifs_credits *credits = &credits_on_stack;
- size_t cur_len, max_len;
- int rc;
- pid_t pid;
- struct TCP_Server_Info *server;
-
- server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
-
-#ifdef CONFIG_CIFS_SMB_DIRECT
- if (server->smbd_conn)
- max_segs = server->smbd_conn->max_frmr_depth;
-#endif
-
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
- pid = open_file->pid;
- else
- pid = current->tgid;
-
- do {
- if (open_file->invalidHandle) {
- rc = cifs_reopen_file(open_file, true);
- if (rc == -EAGAIN)
- continue;
- else if (rc)
- break;
- }
-
- if (cifs_sb->ctx->rsize == 0)
- cifs_sb->ctx->rsize =
- server->ops->negotiate_rsize(tlink_tcon(open_file->tlink),
- cifs_sb->ctx);
-
- rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
- &rsize, credits);
- if (rc)
- break;
-
- max_len = min_t(size_t, len, rsize);
-
- cur_len = cifs_limit_bvec_subset(&ctx->iter, max_len,
- max_segs, &nsegs);
- cifs_dbg(FYI, "read-to-iter len=%zx/%zx nsegs=%u/%lu/%u\n",
- cur_len, max_len, nsegs, ctx->iter.nr_segs, max_segs);
- if (cur_len == 0) {
- rc = -EIO;
- add_credits_and_wake_if(server, credits, 0);
- break;
- }
-
- rdata = cifs_readdata_alloc(cifs_uncached_readv_complete);
- if (!rdata) {
- add_credits_and_wake_if(server, credits, 0);
- rc = -ENOMEM;
- break;
- }
-
- rdata->server = server;
- rdata->cfile = cifsFileInfo_get(open_file);
- rdata->offset = fpos;
- rdata->bytes = cur_len;
- rdata->pid = pid;
- rdata->credits = credits_on_stack;
- rdata->ctx = ctx;
- kref_get(&ctx->refcount);
-
- rdata->iter = ctx->iter;
- iov_iter_truncate(&rdata->iter, cur_len);
+ ssize_t rc;
+ struct inode *inode = file_inode(iocb->ki_filp);
- rc = adjust_credits(server, &rdata->credits, rdata->bytes);
+ if (iocb->ki_flags & IOCB_DIRECT)
+ return netfs_unbuffered_read_iter(iocb, iter);
- if (!rc) {
- if (rdata->cfile->invalidHandle)
- rc = -EAGAIN;
- else
- rc = server->ops->async_readv(rdata);
- }
-
- if (rc) {
- add_credits_and_wake_if(server, &rdata->credits, 0);
- kref_put(&rdata->refcount, cifs_readdata_release);
- if (rc == -EAGAIN)
- continue;
- break;
- }
-
- list_add_tail(&rdata->list, rdata_list);
- iov_iter_advance(&ctx->iter, cur_len);
- fpos += cur_len;
- len -= cur_len;
- } while (len > 0);
+ rc = cifs_revalidate_mapping(inode);
+ if (rc)
+ return rc;
- return rc;
+ return netfs_file_read_iter(iocb, iter);
}
-static void
-collect_uncached_read_data(struct cifs_aio_ctx *ctx)
+ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
- struct cifs_readdata *rdata, *tmp;
- struct cifs_sb_info *cifs_sb;
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct cifsInodeInfo *cinode = CIFS_I(inode);
+ ssize_t written;
int rc;
- cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
-
- mutex_lock(&ctx->aio_mutex);
-
- if (list_empty(&ctx->list)) {
- mutex_unlock(&ctx->aio_mutex);
- return;
- }
-
- rc = ctx->rc;
- /* the loop below should proceed in the order of increasing offsets */
-again:
- list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
- if (!rc) {
- if (!try_wait_for_completion(&rdata->done)) {
- mutex_unlock(&ctx->aio_mutex);
- return;
- }
-
- if (rdata->result == -EAGAIN) {
- /* resend call if it's a retryable error */
- struct list_head tmp_list;
- unsigned int got_bytes = rdata->got_bytes;
-
- list_del_init(&rdata->list);
- INIT_LIST_HEAD(&tmp_list);
-
- if (ctx->direct_io) {
- /*
- * Re-use rdata as this is a
- * direct I/O
- */
- rc = cifs_resend_rdata(
- rdata,
- &tmp_list, ctx);
- } else {
- rc = cifs_send_async_read(
- rdata->offset + got_bytes,
- rdata->bytes - got_bytes,
- rdata->cfile, cifs_sb,
- &tmp_list, ctx);
-
- kref_put(&rdata->refcount,
- cifs_readdata_release);
- }
-
- list_splice(&tmp_list, &ctx->list);
-
- goto again;
- } else if (rdata->result)
- rc = rdata->result;
-
- /* if there was a short read -- discard anything left */
- if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
- rc = -ENODATA;
-
- ctx->total_len += rdata->got_bytes;
- }
- list_del_init(&rdata->list);
- kref_put(&rdata->refcount, cifs_readdata_release);
- }
-
- /* mask nodata case */
- if (rc == -ENODATA)
- rc = 0;
-
- ctx->rc = (rc == 0) ? (ssize_t)ctx->total_len : rc;
-
- mutex_unlock(&ctx->aio_mutex);
-
- if (ctx->iocb && ctx->iocb->ki_complete)
- ctx->iocb->ki_complete(ctx->iocb, ctx->rc);
- else
- complete(&ctx->done);
-}
-
-static ssize_t __cifs_readv(
- struct kiocb *iocb, struct iov_iter *to, bool direct)
-{
- size_t len;
- struct file *file = iocb->ki_filp;
- struct cifs_sb_info *cifs_sb;
- struct cifsFileInfo *cfile;
- struct cifs_tcon *tcon;
- ssize_t rc, total_read = 0;
- loff_t offset = iocb->ki_pos;
- struct cifs_aio_ctx *ctx;
-
- len = iov_iter_count(to);
- if (!len)
- return 0;
-
- cifs_sb = CIFS_FILE_SB(file);
- cfile = file->private_data;
- tcon = tlink_tcon(cfile->tlink);
-
- if (!tcon->ses->server->ops->async_readv)
- return -ENOSYS;
-
- if ((file->f_flags & O_ACCMODE) == O_WRONLY)
- cifs_dbg(FYI, "attempting read on write only file instance\n");
-
- ctx = cifs_aio_ctx_alloc();
- if (!ctx)
- return -ENOMEM;
-
- ctx->pos = offset;
- ctx->direct_io = direct;
- ctx->len = len;
- ctx->cfile = cifsFileInfo_get(cfile);
- ctx->nr_pinned_pages = 0;
-
- if (!is_sync_kiocb(iocb))
- ctx->iocb = iocb;
-
- if (user_backed_iter(to)) {
- /*
- * Extract IOVEC/UBUF-type iterators to a BVEC-type iterator as
- * they contain references to the calling process's virtual
- * memory layout which won't be available in an async worker
- * thread. This also takes a pin on every folio involved.
- */
- rc = netfs_extract_user_iter(to, iov_iter_count(to),
- &ctx->iter, 0);
- if (rc < 0) {
- kref_put(&ctx->refcount, cifs_aio_ctx_release);
- return rc;
- }
-
- ctx->nr_pinned_pages = rc;
- ctx->bv = (void *)ctx->iter.bvec;
- ctx->bv_need_unpin = iov_iter_extract_will_pin(to);
- ctx->should_dirty = true;
- } else if ((iov_iter_is_bvec(to) || iov_iter_is_kvec(to)) &&
- !is_sync_kiocb(iocb)) {
- /*
- * If the op is asynchronous, we need to copy the list attached
- * to a BVEC/KVEC-type iterator, but we assume that the storage
- * will be retained by the caller; in any case, we may or may
- * not be able to pin the pages, so we don't try.
- */
- ctx->bv = (void *)dup_iter(&ctx->iter, to, GFP_KERNEL);
- if (!ctx->bv) {
- kref_put(&ctx->refcount, cifs_aio_ctx_release);
- return -ENOMEM;
- }
- } else {
- /*
- * Otherwise, we just pass the iterator down as-is and rely on
- * the caller to make sure the pages referred to by the
- * iterator don't evaporate.
- */
- ctx->iter = *to;
- }
-
- if (direct) {
- rc = filemap_write_and_wait_range(file->f_inode->i_mapping,
- offset, offset + len - 1);
- if (rc) {
- kref_put(&ctx->refcount, cifs_aio_ctx_release);
- return -EAGAIN;
+ if (iocb->ki_filp->f_flags & O_DIRECT) {
+ written = netfs_unbuffered_write_iter(iocb, from);
+ if (written > 0 && CIFS_CACHE_READ(cinode)) {
+ cifs_zap_mapping(inode);
+ cifs_dbg(FYI,
+ "Set no oplock for inode=%p after a write operation\n",
+ inode);
+ cinode->oplock = 0;
}
+ return written;
}
- /* grab a lock here due to read response handlers can access ctx */
- mutex_lock(&ctx->aio_mutex);
-
- rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
-
- /* if at least one read request send succeeded, then reset rc */
- if (!list_empty(&ctx->list))
- rc = 0;
-
- mutex_unlock(&ctx->aio_mutex);
-
- if (rc) {
- kref_put(&ctx->refcount, cifs_aio_ctx_release);
- return rc;
- }
-
- if (!is_sync_kiocb(iocb)) {
- kref_put(&ctx->refcount, cifs_aio_ctx_release);
- return -EIOCBQUEUED;
- }
-
- rc = wait_for_completion_killable(&ctx->done);
- if (rc) {
- mutex_lock(&ctx->aio_mutex);
- ctx->rc = rc = -EINTR;
- total_read = ctx->total_len;
- mutex_unlock(&ctx->aio_mutex);
- } else {
- rc = ctx->rc;
- total_read = ctx->total_len;
- }
+ written = cifs_get_writer(cinode);
+ if (written)
+ return written;
- kref_put(&ctx->refcount, cifs_aio_ctx_release);
+ written = netfs_file_write_iter(iocb, from);
- if (total_read) {
- iocb->ki_pos += total_read;
- return total_read;
+ if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
+ rc = filemap_fdatawrite(inode->i_mapping);
+ if (rc)
+ cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
+ rc, inode);
}
- return rc;
-}
-
-ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to)
-{
- return __cifs_readv(iocb, to, true);
-}
-ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
-{
- return __cifs_readv(iocb, to, false);
+ cifs_put_writer(cinode);
+ return written;
}
ssize_t
@@ -4307,140 +2902,52 @@ cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
* pos+len-1.
*/
if (!CIFS_CACHE_READ(cinode))
- return cifs_user_readv(iocb, to);
+ return netfs_unbuffered_read_iter(iocb, to);
if (cap_unix(tcon->ses) &&
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
- ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
- return generic_file_read_iter(iocb, to);
+ ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
+ if (iocb->ki_flags & IOCB_DIRECT)
+ return netfs_unbuffered_read_iter(iocb, to);
+ return netfs_buffered_read_iter(iocb, to);
+ }
/*
* We need to hold the sem to be sure nobody modifies lock list
* with a brlock that prevents reading.
*/
- down_read(&cinode->lock_sem);
- if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
- tcon->ses->server->vals->shared_lock_type,
- 0, NULL, CIFS_READ_OP))
- rc = generic_file_read_iter(iocb, to);
- up_read(&cinode->lock_sem);
- return rc;
-}
-
-static ssize_t
-cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
-{
- int rc = -EACCES;
- unsigned int bytes_read = 0;
- unsigned int total_read;
- unsigned int current_read_size;
- unsigned int rsize;
- struct cifs_sb_info *cifs_sb;
- struct cifs_tcon *tcon;
- struct TCP_Server_Info *server;
- unsigned int xid;
- char *cur_offset;
- struct cifsFileInfo *open_file;
- struct cifs_io_parms io_parms = {0};
- int buf_type = CIFS_NO_BUFFER;
- __u32 pid;
-
- xid = get_xid();
- cifs_sb = CIFS_FILE_SB(file);
-
- /* FIXME: set up handlers for larger reads and/or convert to async */
- rsize = min_t(unsigned int, cifs_sb->ctx->rsize, CIFSMaxBufSize);
-
- if (file->private_data == NULL) {
- rc = -EBADF;
- free_xid(xid);
- return rc;
- }
- open_file = file->private_data;
- tcon = tlink_tcon(open_file->tlink);
- server = cifs_pick_channel(tcon->ses);
-
- if (!server->ops->sync_read) {
- free_xid(xid);
- return -ENOSYS;
- }
-
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
- pid = open_file->pid;
- else
- pid = current->tgid;
-
- if ((file->f_flags & O_ACCMODE) == O_WRONLY)
- cifs_dbg(FYI, "attempting read on write only file instance\n");
-
- for (total_read = 0, cur_offset = read_data; read_size > total_read;
- total_read += bytes_read, cur_offset += bytes_read) {
- do {
- current_read_size = min_t(uint, read_size - total_read,
- rsize);
- /*
- * For windows me and 9x we do not want to request more
- * than it negotiated since it will refuse the read
- * then.
- */
- if (!(tcon->ses->capabilities &
- tcon->ses->server->vals->cap_large_files)) {
- current_read_size = min_t(uint,
- current_read_size, CIFSMaxBufSize);
- }
- if (open_file->invalidHandle) {
- rc = cifs_reopen_file(open_file, true);
- if (rc != 0)
- break;
- }
- io_parms.pid = pid;
- io_parms.tcon = tcon;
- io_parms.offset = *offset;
- io_parms.length = current_read_size;
- io_parms.server = server;
- rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
- &bytes_read, &cur_offset,
- &buf_type);
- } while (rc == -EAGAIN);
-
- if (rc || (bytes_read == 0)) {
- if (total_read) {
- break;
- } else {
- free_xid(xid);
- return rc;
- }
- } else {
- cifs_stats_bytes_read(tcon, total_read);
- *offset += bytes_read;
- }
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ rc = netfs_start_io_direct(inode);
+ if (rc < 0)
+ goto out;
+ down_read(&cinode->lock_sem);
+ if (!cifs_find_lock_conflict(
+ cfile, iocb->ki_pos, iov_iter_count(to),
+ tcon->ses->server->vals->shared_lock_type,
+ 0, NULL, CIFS_READ_OP))
+ rc = netfs_unbuffered_read_iter_locked(iocb, to);
+ up_read(&cinode->lock_sem);
+ netfs_end_io_direct(inode);
+ } else {
+ rc = netfs_start_io_read(inode);
+ if (rc < 0)
+ goto out;
+ down_read(&cinode->lock_sem);
+ if (!cifs_find_lock_conflict(
+ cfile, iocb->ki_pos, iov_iter_count(to),
+ tcon->ses->server->vals->shared_lock_type,
+ 0, NULL, CIFS_READ_OP))
+ rc = filemap_read(iocb, to, 0);
+ up_read(&cinode->lock_sem);
+ netfs_end_io_read(inode);
}
- free_xid(xid);
- return total_read;
+out:
+ return rc;
}
-/*
- * If the page is mmap'ed into a process' page tables, then we need to make
- * sure that it doesn't change while being written back.
- */
static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
{
- struct folio *folio = page_folio(vmf->page);
-
- /* Wait for the folio to be written to the cache before we allow it to
- * be modified. We then assume the entire folio will need writing back.
- */
-#ifdef CONFIG_CIFS_FSCACHE
- if (folio_test_fscache(folio) &&
- folio_wait_fscache_killable(folio) < 0)
- return VM_FAULT_RETRY;
-#endif
-
- folio_wait_writeback(folio);
-
- if (folio_lock_killable(folio) < 0)
- return VM_FAULT_RETRY;
- return VM_FAULT_LOCKED;
+ return netfs_page_mkwrite(vmf, NULL);
}
static const struct vm_operations_struct cifs_file_vm_ops = {
@@ -4486,290 +2993,6 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
return rc;
}
-/*
- * Unlock a bunch of folios in the pagecache.
- */
-static void cifs_unlock_folios(struct address_space *mapping, pgoff_t first, pgoff_t last)
-{
- struct folio *folio;
- XA_STATE(xas, &mapping->i_pages, first);
-
- rcu_read_lock();
- xas_for_each(&xas, folio, last) {
- folio_unlock(folio);
- }
- rcu_read_unlock();
-}
-
-static void cifs_readahead_complete(struct work_struct *work)
-{
- struct cifs_readdata *rdata = container_of(work,
- struct cifs_readdata, work);
- struct folio *folio;
- pgoff_t last;
- bool good = rdata->result == 0 || (rdata->result == -EAGAIN && rdata->got_bytes);
-
- XA_STATE(xas, &rdata->mapping->i_pages, rdata->offset / PAGE_SIZE);
-
- if (good)
- cifs_readahead_to_fscache(rdata->mapping->host,
- rdata->offset, rdata->bytes);
-
- if (iov_iter_count(&rdata->iter) > 0)
- iov_iter_zero(iov_iter_count(&rdata->iter), &rdata->iter);
-
- last = (rdata->offset + rdata->bytes - 1) / PAGE_SIZE;
-
- rcu_read_lock();
- xas_for_each(&xas, folio, last) {
- if (good) {
- flush_dcache_folio(folio);
- folio_mark_uptodate(folio);
- }
- folio_unlock(folio);
- }
- rcu_read_unlock();
-
- kref_put(&rdata->refcount, cifs_readdata_release);
-}
-
-static void cifs_readahead(struct readahead_control *ractl)
-{
- struct cifsFileInfo *open_file = ractl->file->private_data;
- struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(ractl->file);
- struct TCP_Server_Info *server;
- unsigned int xid, nr_pages, cache_nr_pages = 0;
- unsigned int ra_pages;
- pgoff_t next_cached = ULONG_MAX, ra_index;
- bool caching = fscache_cookie_enabled(cifs_inode_cookie(ractl->mapping->host)) &&
- cifs_inode_cookie(ractl->mapping->host)->cache_priv;
- bool check_cache = caching;
- pid_t pid;
- int rc = 0;
-
- /* Note that readahead_count() lags behind our dequeuing of pages from
- * the ractl, wo we have to keep track for ourselves.
- */
- ra_pages = readahead_count(ractl);
- ra_index = readahead_index(ractl);
-
- xid = get_xid();
-
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
- pid = open_file->pid;
- else
- pid = current->tgid;
-
- server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
-
- cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
- __func__, ractl->file, ractl->mapping, ra_pages);
-
- /*
- * Chop the readahead request up into rsize-sized read requests.
- */
- while ((nr_pages = ra_pages)) {
- unsigned int i, rsize;
- struct cifs_readdata *rdata;
- struct cifs_credits credits_on_stack;
- struct cifs_credits *credits = &credits_on_stack;
- struct folio *folio;
- pgoff_t fsize;
-
- /*
- * Find out if we have anything cached in the range of
- * interest, and if so, where the next chunk of cached data is.
- */
- if (caching) {
- if (check_cache) {
- rc = cifs_fscache_query_occupancy(
- ractl->mapping->host, ra_index, nr_pages,
- &next_cached, &cache_nr_pages);
- if (rc < 0)
- caching = false;
- check_cache = false;
- }
-
- if (ra_index == next_cached) {
- /*
- * TODO: Send a whole batch of pages to be read
- * by the cache.
- */
- folio = readahead_folio(ractl);
- fsize = folio_nr_pages(folio);
- ra_pages -= fsize;
- ra_index += fsize;
- if (cifs_readpage_from_fscache(ractl->mapping->host,
- &folio->page) < 0) {
- /*
- * TODO: Deal with cache read failure
- * here, but for the moment, delegate
- * that to readpage.
- */
- caching = false;
- }
- folio_unlock(folio);
- next_cached += fsize;
- cache_nr_pages -= fsize;
- if (cache_nr_pages == 0)
- check_cache = true;
- continue;
- }
- }
-
- if (open_file->invalidHandle) {
- rc = cifs_reopen_file(open_file, true);
- if (rc) {
- if (rc == -EAGAIN)
- continue;
- break;
- }
- }
-
- if (cifs_sb->ctx->rsize == 0)
- cifs_sb->ctx->rsize =
- server->ops->negotiate_rsize(tlink_tcon(open_file->tlink),
- cifs_sb->ctx);
-
- rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
- &rsize, credits);
- if (rc)
- break;
- nr_pages = min_t(size_t, rsize / PAGE_SIZE, ra_pages);
- if (next_cached != ULONG_MAX)
- nr_pages = min_t(size_t, nr_pages, next_cached - ra_index);
-
- /*
- * Give up immediately if rsize is too small to read an entire
- * page. The VFS will fall back to readpage. We should never
- * reach this point however since we set ra_pages to 0 when the
- * rsize is smaller than a cache page.
- */
- if (unlikely(!nr_pages)) {
- add_credits_and_wake_if(server, credits, 0);
- break;
- }
-
- rdata = cifs_readdata_alloc(cifs_readahead_complete);
- if (!rdata) {
- /* best to give up if we're out of mem */
- add_credits_and_wake_if(server, credits, 0);
- break;
- }
-
- rdata->offset = ra_index * PAGE_SIZE;
- rdata->bytes = nr_pages * PAGE_SIZE;
- rdata->cfile = cifsFileInfo_get(open_file);
- rdata->server = server;
- rdata->mapping = ractl->mapping;
- rdata->pid = pid;
- rdata->credits = credits_on_stack;
-
- for (i = 0; i < nr_pages; i++) {
- if (!readahead_folio(ractl))
- WARN_ON(1);
- }
- ra_pages -= nr_pages;
- ra_index += nr_pages;
-
- iov_iter_xarray(&rdata->iter, ITER_DEST, &rdata->mapping->i_pages,
- rdata->offset, rdata->bytes);
-
- rc = adjust_credits(server, &rdata->credits, rdata->bytes);
- if (!rc) {
- if (rdata->cfile->invalidHandle)
- rc = -EAGAIN;
- else
- rc = server->ops->async_readv(rdata);
- }
-
- if (rc) {
- add_credits_and_wake_if(server, &rdata->credits, 0);
- cifs_unlock_folios(rdata->mapping,
- rdata->offset / PAGE_SIZE,
- (rdata->offset + rdata->bytes - 1) / PAGE_SIZE);
- /* Fallback to the readpage in error/reconnect cases */
- kref_put(&rdata->refcount, cifs_readdata_release);
- break;
- }
-
- kref_put(&rdata->refcount, cifs_readdata_release);
- }
-
- free_xid(xid);
-}
-
-/*
- * cifs_readpage_worker must be called with the page pinned
- */
-static int cifs_readpage_worker(struct file *file, struct page *page,
- loff_t *poffset)
-{
- struct inode *inode = file_inode(file);
- struct timespec64 atime, mtime;
- char *read_data;
- int rc;
-
- /* Is the page cached? */
- rc = cifs_readpage_from_fscache(inode, page);
- if (rc == 0)
- goto read_complete;
-
- read_data = kmap(page);
- /* for reads over a certain size could initiate async read ahead */
-
- rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
-
- if (rc < 0)
- goto io_error;
- else
- cifs_dbg(FYI, "Bytes read %d\n", rc);
-
- /* we do not want atime to be less than mtime, it broke some apps */
- atime = inode_set_atime_to_ts(inode, current_time(inode));
- mtime = inode_get_mtime(inode);
- if (timespec64_compare(&atime, &mtime) < 0)
- inode_set_atime_to_ts(inode, inode_get_mtime(inode));
-
- if (PAGE_SIZE > rc)
- memset(read_data + rc, 0, PAGE_SIZE - rc);
-
- flush_dcache_page(page);
- SetPageUptodate(page);
- rc = 0;
-
-io_error:
- kunmap(page);
-
-read_complete:
- unlock_page(page);
- return rc;
-}
-
-static int cifs_read_folio(struct file *file, struct folio *folio)
-{
- struct page *page = &folio->page;
- loff_t offset = page_file_offset(page);
- int rc = -EACCES;
- unsigned int xid;
-
- xid = get_xid();
-
- if (file->private_data == NULL) {
- rc = -EBADF;
- free_xid(xid);
- return rc;
- }
-
- cifs_dbg(FYI, "read_folio %p at offset %d 0x%x\n",
- page, (int)offset, (int)offset);
-
- rc = cifs_readpage_worker(file, page, &offset);
-
- free_xid(xid);
- return rc;
-}
-
static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
{
struct cifsFileInfo *open_file;
@@ -4817,123 +3040,6 @@ bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
return true;
}
-static int cifs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len,
- struct page **pagep, void **fsdata)
-{
- int oncethru = 0;
- pgoff_t index = pos >> PAGE_SHIFT;
- loff_t offset = pos & (PAGE_SIZE - 1);
- loff_t page_start = pos & PAGE_MASK;
- loff_t i_size;
- struct page *page;
- int rc = 0;
-
- cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
-
-start:
- page = grab_cache_page_write_begin(mapping, index);
- if (!page) {
- rc = -ENOMEM;
- goto out;
- }
-
- if (PageUptodate(page))
- goto out;
-
- /*
- * If we write a full page it will be up to date, no need to read from
- * the server. If the write is short, we'll end up doing a sync write
- * instead.
- */
- if (len == PAGE_SIZE)
- goto out;
-
- /*
- * optimize away the read when we have an oplock, and we're not
- * expecting to use any of the data we'd be reading in. That
- * is, when the page lies beyond the EOF, or straddles the EOF
- * and the write will cover all of the existing data.
- */
- if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
- i_size = i_size_read(mapping->host);
- if (page_start >= i_size ||
- (offset == 0 && (pos + len) >= i_size)) {
- zero_user_segments(page, 0, offset,
- offset + len,
- PAGE_SIZE);
- /*
- * PageChecked means that the parts of the page
- * to which we're not writing are considered up
- * to date. Once the data is copied to the
- * page, it can be set uptodate.
- */
- SetPageChecked(page);
- goto out;
- }
- }
-
- if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
- /*
- * might as well read a page, it is fast enough. If we get
- * an error, we don't need to return it. cifs_write_end will
- * do a sync write instead since PG_uptodate isn't set.
- */
- cifs_readpage_worker(file, page, &page_start);
- put_page(page);
- oncethru = 1;
- goto start;
- } else {
- /* we could try using another file handle if there is one -
- but how would we lock it to prevent close of that handle
- racing with this read? In any case
- this will be written out by write_end so is fine */
- }
-out:
- *pagep = page;
- return rc;
-}
-
-static bool cifs_release_folio(struct folio *folio, gfp_t gfp)
-{
- if (folio_test_private(folio))
- return 0;
- if (folio_test_fscache(folio)) {
- if (current_is_kswapd() || !(gfp & __GFP_FS))
- return false;
- folio_wait_fscache(folio);
- }
- fscache_note_page_release(cifs_inode_cookie(folio->mapping->host));
- return true;
-}
-
-static void cifs_invalidate_folio(struct folio *folio, size_t offset,
- size_t length)
-{
- folio_wait_fscache(folio);
-}
-
-static int cifs_launder_folio(struct folio *folio)
-{
- int rc = 0;
- loff_t range_start = folio_pos(folio);
- loff_t range_end = range_start + folio_size(folio);
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_ALL,
- .nr_to_write = 0,
- .range_start = range_start,
- .range_end = range_end,
- };
-
- cifs_dbg(FYI, "Launder page: %lu\n", folio->index);
-
- if (folio_clear_dirty_for_io(folio))
- rc = cifs_writepage_locked(&folio->page, &wbc);
-
- folio_wait_fscache(folio);
- return rc;
-}
-
void cifs_oplock_break(struct work_struct *work)
{
struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
@@ -5023,25 +3129,6 @@ out:
cifs_done_oplock_break(cinode);
}
-/*
- * The presence of cifs_direct_io() in the address space ops vector
- * allowes open() O_DIRECT flags which would have failed otherwise.
- *
- * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
- * so this method should never be called.
- *
- * Direct IO is not yet supported in the cached mode.
- */
-static ssize_t
-cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
-{
- /*
- * FIXME
- * Eventually need to support direct IO for non forcedirectio mounts
- */
- return -EINVAL;
-}
-
static int cifs_swap_activate(struct swap_info_struct *sis,
struct file *swap_file, sector_t *span)
{
@@ -5103,22 +3190,19 @@ static void cifs_swap_deactivate(struct file *file)
}
const struct address_space_operations cifs_addr_ops = {
- .read_folio = cifs_read_folio,
- .readahead = cifs_readahead,
- .writepages = cifs_writepages,
- .write_begin = cifs_write_begin,
- .write_end = cifs_write_end,
- .dirty_folio = netfs_dirty_folio,
- .release_folio = cifs_release_folio,
- .direct_IO = cifs_direct_io,
- .invalidate_folio = cifs_invalidate_folio,
- .launder_folio = cifs_launder_folio,
- .migrate_folio = filemap_migrate_folio,
+ .read_folio = netfs_read_folio,
+ .readahead = netfs_readahead,
+ .writepages = netfs_writepages,
+ .dirty_folio = netfs_dirty_folio,
+ .release_folio = netfs_release_folio,
+ .direct_IO = noop_direct_IO,
+ .invalidate_folio = netfs_invalidate_folio,
+ .migrate_folio = filemap_migrate_folio,
/*
* TODO: investigate and if useful we could add an is_dirty_writeback
* helper if needed
*/
- .swap_activate = cifs_swap_activate,
+ .swap_activate = cifs_swap_activate,
.swap_deactivate = cifs_swap_deactivate,
};
@@ -5128,13 +3212,10 @@ const struct address_space_operations cifs_addr_ops = {
* to leave cifs_readahead out of the address space operations.
*/
const struct address_space_operations cifs_addr_ops_smallbuf = {
- .read_folio = cifs_read_folio,
- .writepages = cifs_writepages,
- .write_begin = cifs_write_begin,
- .write_end = cifs_write_end,
- .dirty_folio = netfs_dirty_folio,
- .release_folio = cifs_release_folio,
- .invalidate_folio = cifs_invalidate_folio,
- .launder_folio = cifs_launder_folio,
- .migrate_folio = filemap_migrate_folio,
+ .read_folio = netfs_read_folio,
+ .writepages = netfs_writepages,
+ .dirty_folio = netfs_dirty_folio,
+ .release_folio = netfs_release_folio,
+ .invalidate_folio = netfs_invalidate_folio,
+ .migrate_folio = filemap_migrate_folio,
};
diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
index bdcbe6ff2739..3bbac925d076 100644
--- a/fs/smb/client/fs_context.c
+++ b/fs/smb/client/fs_context.c
@@ -37,7 +37,7 @@
#include "rfc1002pdu.h"
#include "fs_context.h"
-static DEFINE_MUTEX(cifs_mount_mutex);
+DEFINE_MUTEX(cifs_mount_mutex);
static const match_table_t cifs_smb_version_tokens = {
{ Smb_1, SMB1_VERSION_STRING },
@@ -162,6 +162,7 @@ const struct fs_parameter_spec smb3_fs_parameters[] = {
fsparam_string("username", Opt_user),
fsparam_string("pass", Opt_pass),
fsparam_string("password", Opt_pass),
+ fsparam_string("password2", Opt_pass2),
fsparam_string("ip", Opt_ip),
fsparam_string("addr", Opt_ip),
fsparam_string("domain", Opt_domain),
@@ -345,6 +346,7 @@ smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx
new_ctx->nodename = NULL;
new_ctx->username = NULL;
new_ctx->password = NULL;
+ new_ctx->password2 = NULL;
new_ctx->server_hostname = NULL;
new_ctx->domainname = NULL;
new_ctx->UNC = NULL;
@@ -357,6 +359,7 @@ smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx
DUP_CTX_STR(prepath);
DUP_CTX_STR(username);
DUP_CTX_STR(password);
+ DUP_CTX_STR(password2);
DUP_CTX_STR(server_hostname);
DUP_CTX_STR(UNC);
DUP_CTX_STR(source);
@@ -745,6 +748,16 @@ static int smb3_fs_context_validate(struct fs_context *fc)
/* set the port that we got earlier */
cifs_set_port((struct sockaddr *)&ctx->dstaddr, ctx->port);
+ if (ctx->uid_specified && !ctx->forceuid_specified) {
+ ctx->override_uid = 1;
+ pr_notice("enabling forceuid mount option implicitly because uid= option is specified\n");
+ }
+
+ if (ctx->gid_specified && !ctx->forcegid_specified) {
+ ctx->override_gid = 1;
+ pr_notice("enabling forcegid mount option implicitly because gid= option is specified\n");
+ }
+
if (ctx->override_uid && !ctx->uid_specified) {
ctx->override_uid = 0;
pr_notice("ignoring forceuid mount option specified with no uid= option\n");
@@ -783,9 +796,9 @@ static int smb3_get_tree(struct fs_context *fc)
if (err)
return err;
- mutex_lock(&cifs_mount_mutex);
+ cifs_mount_lock();
ret = smb3_get_tree_common(fc);
- mutex_unlock(&cifs_mount_mutex);
+ cifs_mount_unlock();
return ret;
}
@@ -905,6 +918,8 @@ static int smb3_reconfigure(struct fs_context *fc)
else {
kfree_sensitive(ses->password);
ses->password = kstrdup(ctx->password, GFP_KERNEL);
+ kfree_sensitive(ses->password2);
+ ses->password2 = kstrdup(ctx->password2, GFP_KERNEL);
}
STEAL_STRING(cifs_sb, ctx, domainname);
STEAL_STRING(cifs_sb, ctx, nodename);
@@ -1014,12 +1029,14 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
ctx->override_uid = 0;
else
ctx->override_uid = 1;
+ ctx->forceuid_specified = true;
break;
case Opt_forcegid:
if (result.negated)
ctx->override_gid = 0;
else
ctx->override_gid = 1;
+ ctx->forcegid_specified = true;
break;
case Opt_perm:
if (result.negated)
@@ -1305,6 +1322,18 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
goto cifs_parse_mount_err;
}
break;
+ case Opt_pass2:
+ kfree_sensitive(ctx->password2);
+ ctx->password2 = NULL;
+ if (strlen(param->string) == 0)
+ break;
+
+ ctx->password2 = kstrdup(param->string, GFP_KERNEL);
+ if (ctx->password2 == NULL) {
+ cifs_errorf(fc, "OOM when copying password2 string\n");
+ goto cifs_parse_mount_err;
+ }
+ break;
case Opt_ip:
if (strlen(param->string) == 0) {
ctx->got_ip = false;
@@ -1608,6 +1637,8 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
cifs_parse_mount_err:
kfree_sensitive(ctx->password);
ctx->password = NULL;
+ kfree_sensitive(ctx->password2);
+ ctx->password2 = NULL;
return -EINVAL;
}
@@ -1713,6 +1744,8 @@ smb3_cleanup_fs_context_contents(struct smb3_fs_context *ctx)
ctx->username = NULL;
kfree_sensitive(ctx->password);
ctx->password = NULL;
+ kfree_sensitive(ctx->password2);
+ ctx->password2 = NULL;
kfree(ctx->server_hostname);
ctx->server_hostname = NULL;
kfree(ctx->UNC);
diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h
index 7863f2248c4d..cf577ec0dd0a 100644
--- a/fs/smb/client/fs_context.h
+++ b/fs/smb/client/fs_context.h
@@ -145,6 +145,7 @@ enum cifs_param {
Opt_source,
Opt_user,
Opt_pass,
+ Opt_pass2,
Opt_ip,
Opt_domain,
Opt_srcaddr,
@@ -164,6 +165,8 @@ enum cifs_param {
};
struct smb3_fs_context {
+ bool forceuid_specified;
+ bool forcegid_specified;
bool uid_specified;
bool cruid_specified;
bool gid_specified;
@@ -177,6 +180,7 @@ struct smb3_fs_context {
char *username;
char *password;
+ char *password2;
char *domainname;
char *source;
char *server_hostname;
@@ -304,4 +308,16 @@ extern void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb);
#define MAX_CACHED_FIDS 16
extern char *cifs_sanitize_prepath(char *prepath, gfp_t gfp);
+extern struct mutex cifs_mount_mutex;
+
+static inline void cifs_mount_lock(void)
+{
+ mutex_lock(&cifs_mount_mutex);
+}
+
+static inline void cifs_mount_unlock(void)
+{
+ mutex_unlock(&cifs_mount_mutex);
+}
+
#endif
diff --git a/fs/smb/client/fscache.c b/fs/smb/client/fscache.c
index c4a3cb736881..01424a5cdb99 100644
--- a/fs/smb/client/fscache.c
+++ b/fs/smb/client/fscache.c
@@ -12,6 +12,16 @@
#include "cifs_fs_sb.h"
#include "cifsproto.h"
+/*
+ * Key for fscache inode. [!] Contents must match comparisons in cifs_find_inode().
+ */
+struct cifs_fscache_inode_key {
+
+ __le64 uniqueid; /* server inode number */
+ __le64 createtime; /* creation time on server */
+ u8 type; /* S_IFMT file type */
+} __packed;
+
static void cifs_fscache_fill_volume_coherency(
struct cifs_tcon *tcon,
struct cifs_fscache_volume_coherency_data *cd)
@@ -33,12 +43,23 @@ int cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
char *key;
int ret = -ENOMEM;
+ if (tcon->fscache_acquired)
+ return 0;
+
+ mutex_lock(&tcon->fscache_lock);
+ if (tcon->fscache_acquired) {
+ mutex_unlock(&tcon->fscache_lock);
+ return 0;
+ }
+ tcon->fscache_acquired = true;
+
tcon->fscache = NULL;
switch (sa->sa_family) {
case AF_INET:
case AF_INET6:
break;
default:
+ mutex_unlock(&tcon->fscache_lock);
cifs_dbg(VFS, "Unknown network family '%d'\n", sa->sa_family);
return -EINVAL;
}
@@ -47,6 +68,7 @@ int cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
sharename = extract_sharename(tcon->tree_name);
if (IS_ERR(sharename)) {
+ mutex_unlock(&tcon->fscache_lock);
cifs_dbg(FYI, "%s: couldn't extract sharename\n", __func__);
return PTR_ERR(sharename);
}
@@ -72,6 +94,11 @@ int cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
}
pr_err("Cache volume key already in use (%s)\n", key);
vcookie = NULL;
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+ netfs_trace_tcon_ref_see_fscache_collision);
+ } else {
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+ netfs_trace_tcon_ref_see_fscache_okay);
}
tcon->fscache = vcookie;
@@ -80,6 +107,7 @@ out_2:
kfree(key);
out:
kfree(sharename);
+ mutex_unlock(&tcon->fscache_lock);
return ret;
}
@@ -92,20 +120,26 @@ void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon)
cifs_fscache_fill_volume_coherency(tcon, &cd);
fscache_relinquish_volume(tcon->fscache, &cd, false);
tcon->fscache = NULL;
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+ netfs_trace_tcon_ref_see_fscache_relinq);
}
void cifs_fscache_get_inode_cookie(struct inode *inode)
{
struct cifs_fscache_inode_coherency_data cd;
+ struct cifs_fscache_inode_key key;
struct cifsInodeInfo *cifsi = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+ key.uniqueid = cpu_to_le64(cifsi->uniqueid);
+ key.createtime = cpu_to_le64(cifsi->createtime);
+ key.type = (inode->i_mode & S_IFMT) >> 12;
cifs_fscache_fill_coherency(&cifsi->netfs.inode, &cd);
cifsi->netfs.cache =
fscache_acquire_cookie(tcon->fscache, 0,
- &cifsi->uniqueid, sizeof(cifsi->uniqueid),
+ &key, sizeof(key),
&cd, sizeof(cd),
i_size_read(&cifsi->netfs.inode));
if (cifsi->netfs.cache)
@@ -136,112 +170,3 @@ void cifs_fscache_release_inode_cookie(struct inode *inode)
cifsi->netfs.cache = NULL;
}
}
-
-/*
- * Fallback page reading interface.
- */
-static int fscache_fallback_read_page(struct inode *inode, struct page *page)
-{
- struct netfs_cache_resources cres;
- struct fscache_cookie *cookie = cifs_inode_cookie(inode);
- struct iov_iter iter;
- struct bio_vec bvec;
- int ret;
-
- memset(&cres, 0, sizeof(cres));
- bvec_set_page(&bvec, page, PAGE_SIZE, 0);
- iov_iter_bvec(&iter, ITER_DEST, &bvec, 1, PAGE_SIZE);
-
- ret = fscache_begin_read_operation(&cres, cookie);
- if (ret < 0)
- return ret;
-
- ret = fscache_read(&cres, page_offset(page), &iter, NETFS_READ_HOLE_FAIL,
- NULL, NULL);
- fscache_end_operation(&cres);
- return ret;
-}
-
-/*
- * Fallback page writing interface.
- */
-static int fscache_fallback_write_pages(struct inode *inode, loff_t start, size_t len,
- bool no_space_allocated_yet)
-{
- struct netfs_cache_resources cres;
- struct fscache_cookie *cookie = cifs_inode_cookie(inode);
- struct iov_iter iter;
- int ret;
-
- memset(&cres, 0, sizeof(cres));
- iov_iter_xarray(&iter, ITER_SOURCE, &inode->i_mapping->i_pages, start, len);
-
- ret = fscache_begin_write_operation(&cres, cookie);
- if (ret < 0)
- return ret;
-
- ret = cres.ops->prepare_write(&cres, &start, &len, len, i_size_read(inode),
- no_space_allocated_yet);
- if (ret == 0)
- ret = fscache_write(&cres, start, &iter, NULL, NULL);
- fscache_end_operation(&cres);
- return ret;
-}
-
-/*
- * Retrieve a page from FS-Cache
- */
-int __cifs_readpage_from_fscache(struct inode *inode, struct page *page)
-{
- int ret;
-
- cifs_dbg(FYI, "%s: (fsc:%p, p:%p, i:0x%p\n",
- __func__, cifs_inode_cookie(inode), page, inode);
-
- ret = fscache_fallback_read_page(inode, page);
- if (ret < 0)
- return ret;
-
- /* Read completed synchronously */
- SetPageUptodate(page);
- return 0;
-}
-
-void __cifs_readahead_to_fscache(struct inode *inode, loff_t pos, size_t len)
-{
- cifs_dbg(FYI, "%s: (fsc: %p, p: %llx, l: %zx, i: %p)\n",
- __func__, cifs_inode_cookie(inode), pos, len, inode);
-
- fscache_fallback_write_pages(inode, pos, len, true);
-}
-
-/*
- * Query the cache occupancy.
- */
-int __cifs_fscache_query_occupancy(struct inode *inode,
- pgoff_t first, unsigned int nr_pages,
- pgoff_t *_data_first,
- unsigned int *_data_nr_pages)
-{
- struct netfs_cache_resources cres;
- struct fscache_cookie *cookie = cifs_inode_cookie(inode);
- loff_t start, data_start;
- size_t len, data_len;
- int ret;
-
- ret = fscache_begin_read_operation(&cres, cookie);
- if (ret < 0)
- return ret;
-
- start = first * PAGE_SIZE;
- len = nr_pages * PAGE_SIZE;
- ret = cres.ops->query_occupancy(&cres, start, len, PAGE_SIZE,
- &data_start, &data_len);
- if (ret == 0) {
- *_data_first = data_start / PAGE_SIZE;
- *_data_nr_pages = len / PAGE_SIZE;
- }
-
- fscache_end_operation(&cres);
- return ret;
-}
diff --git a/fs/smb/client/fscache.h b/fs/smb/client/fscache.h
index a3d73720914f..f06cb24f5f3c 100644
--- a/fs/smb/client/fscache.h
+++ b/fs/smb/client/fscache.h
@@ -74,39 +74,9 @@ static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags
i_size_read(inode), flags);
}
-extern int __cifs_fscache_query_occupancy(struct inode *inode,
- pgoff_t first, unsigned int nr_pages,
- pgoff_t *_data_first,
- unsigned int *_data_nr_pages);
-
-static inline int cifs_fscache_query_occupancy(struct inode *inode,
- pgoff_t first, unsigned int nr_pages,
- pgoff_t *_data_first,
- unsigned int *_data_nr_pages)
+static inline bool cifs_fscache_enabled(struct inode *inode)
{
- if (!cifs_inode_cookie(inode))
- return -ENOBUFS;
- return __cifs_fscache_query_occupancy(inode, first, nr_pages,
- _data_first, _data_nr_pages);
-}
-
-extern int __cifs_readpage_from_fscache(struct inode *pinode, struct page *ppage);
-extern void __cifs_readahead_to_fscache(struct inode *pinode, loff_t pos, size_t len);
-
-
-static inline int cifs_readpage_from_fscache(struct inode *inode,
- struct page *page)
-{
- if (cifs_inode_cookie(inode))
- return __cifs_readpage_from_fscache(inode, page);
- return -ENOBUFS;
-}
-
-static inline void cifs_readahead_to_fscache(struct inode *inode,
- loff_t pos, size_t len)
-{
- if (cifs_inode_cookie(inode))
- __cifs_readahead_to_fscache(inode, pos, len);
+ return fscache_cookie_enabled(cifs_inode_cookie(inode));
}
#else /* CONFIG_CIFS_FSCACHE */
@@ -124,25 +94,7 @@ static inline void cifs_fscache_release_inode_cookie(struct inode *inode) {}
static inline void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool update) {}
static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode) { return NULL; }
static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags) {}
-
-static inline int cifs_fscache_query_occupancy(struct inode *inode,
- pgoff_t first, unsigned int nr_pages,
- pgoff_t *_data_first,
- unsigned int *_data_nr_pages)
-{
- *_data_first = ULONG_MAX;
- *_data_nr_pages = 0;
- return -ENOBUFS;
-}
-
-static inline int
-cifs_readpage_from_fscache(struct inode *inode, struct page *page)
-{
- return -ENOBUFS;
-}
-
-static inline
-void cifs_readahead_to_fscache(struct inode *inode, loff_t pos, size_t len) {}
+static inline bool cifs_fscache_enabled(struct inode *inode) { return false; }
#endif /* CONFIG_CIFS_FSCACHE */
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index d28ab0af6049..262576573eb5 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -28,14 +28,26 @@
#include "cached_dir.h"
#include "reparse.h"
+/*
+ * Set parameters for the netfs library
+ */
+static void cifs_set_netfs_context(struct inode *inode)
+{
+ struct cifsInodeInfo *cifs_i = CIFS_I(inode);
+
+ netfs_inode_init(&cifs_i->netfs, &cifs_req_ops, true);
+}
+
static void cifs_set_ops(struct inode *inode)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct netfs_inode *ictx = netfs_inode(inode);
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
inode->i_op = &cifs_file_inode_ops;
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
+ set_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
inode->i_fop = &cifs_file_direct_nobrl_ops;
else
@@ -57,6 +69,7 @@ static void cifs_set_ops(struct inode *inode)
inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
else
inode->i_data.a_ops = &cifs_addr_ops;
+ mapping_set_large_folios(inode->i_mapping);
break;
case S_IFDIR:
if (IS_AUTOMOUNT(inode)) {
@@ -221,8 +234,10 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr,
if (fattr->cf_flags & CIFS_FATTR_JUNCTION)
inode->i_flags |= S_AUTOMOUNT;
- if (inode->i_state & I_NEW)
+ if (inode->i_state & I_NEW) {
+ cifs_set_netfs_context(inode);
cifs_set_ops(inode);
+ }
return 0;
}
@@ -1105,7 +1120,8 @@ static int cifs_get_fattr(struct cifs_open_info_data *data,
} else {
cifs_open_info_to_fattr(fattr, data, sb);
}
- if (!rc && fattr->cf_flags & CIFS_FATTR_DELETE_PENDING)
+ if (!rc && *inode &&
+ (fattr->cf_flags & CIFS_FATTR_DELETE_PENDING))
cifs_mark_open_handles_for_deleted_file(*inode, full_path);
break;
case -EREMOTE:
@@ -1351,6 +1367,8 @@ cifs_find_inode(struct inode *inode, void *opaque)
{
struct cifs_fattr *fattr = opaque;
+ /* [!] The compared values must be the same in struct cifs_fscache_inode_key. */
+
/* don't match inode with different uniqueid */
if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid)
return 0;
@@ -2428,24 +2446,6 @@ cifs_dentry_needs_reval(struct dentry *dentry)
return false;
}
-/*
- * Zap the cache. Called when invalid_mapping flag is set.
- */
-int
-cifs_invalidate_mapping(struct inode *inode)
-{
- int rc = 0;
-
- if (inode->i_mapping && inode->i_mapping->nrpages != 0) {
- rc = invalidate_inode_pages2(inode->i_mapping);
- if (rc)
- cifs_dbg(VFS, "%s: invalidate inode %p failed with rc %d\n",
- __func__, inode, rc);
- }
-
- return rc;
-}
-
/**
* cifs_wait_bit_killable - helper for functions that are sleeping on bit locks
*
@@ -2465,7 +2465,8 @@ int
cifs_revalidate_mapping(struct inode *inode)
{
int rc;
- unsigned long *flags = &CIFS_I(inode)->flags;
+ struct cifsInodeInfo *cifs_inode = CIFS_I(inode);
+ unsigned long *flags = &cifs_inode->flags;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
/* swapfiles are not supposed to be shared */
@@ -2482,9 +2483,13 @@ cifs_revalidate_mapping(struct inode *inode)
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
goto skip_invalidate;
- rc = cifs_invalidate_mapping(inode);
- if (rc)
+ cifs_inode->netfs.zero_point = cifs_inode->netfs.remote_i_size;
+ rc = filemap_invalidate_inode(inode, true, 0, LLONG_MAX);
+ if (rc) {
+ cifs_dbg(VFS, "%s: invalidate inode %p failed with rc %d\n",
+ __func__, inode, rc);
set_bit(CIFS_INO_INVALID_MAPPING, flags);
+ }
}
skip_invalidate:
diff --git a/fs/smb/client/ioctl.c b/fs/smb/client/ioctl.c
index c012dfdba80d..855ac5a62edf 100644
--- a/fs/smb/client/ioctl.c
+++ b/fs/smb/client/ioctl.c
@@ -247,7 +247,9 @@ static int cifs_dump_full_key(struct cifs_tcon *tcon, struct smb3_full_key_debug
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server_it, &cifs_tcp_ses_list, tcp_ses_list) {
list_for_each_entry(ses_it, &server_it->smb_ses_list, smb_ses_list) {
- if (ses_it->Suid == out.session_id) {
+ spin_lock(&ses_it->ses_lock);
+ if (ses_it->ses_status != SES_EXITING &&
+ ses_it->Suid == out.session_id) {
ses = ses_it;
/*
* since we are using the session outside the crit
@@ -255,9 +257,11 @@ static int cifs_dump_full_key(struct cifs_tcon *tcon, struct smb3_full_key_debug
* so increment its refcount
*/
cifs_smb_ses_inc_refcount(ses);
+ spin_unlock(&ses_it->ses_lock);
found = true;
goto search_end;
}
+ spin_unlock(&ses_it->ses_lock);
}
}
search_end:
diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
index c3771fc81328..07c468ddb88a 100644
--- a/fs/smb/client/misc.c
+++ b/fs/smb/client/misc.c
@@ -98,6 +98,7 @@ sesInfoFree(struct cifs_ses *buf_to_free)
kfree(buf_to_free->serverDomain);
kfree(buf_to_free->serverNOS);
kfree_sensitive(buf_to_free->password);
+ kfree_sensitive(buf_to_free->password2);
kfree(buf_to_free->user_name);
kfree(buf_to_free->domainName);
kfree_sensitive(buf_to_free->auth_key.response);
@@ -110,9 +111,10 @@ sesInfoFree(struct cifs_ses *buf_to_free)
}
struct cifs_tcon *
-tcon_info_alloc(bool dir_leases_enabled)
+tcon_info_alloc(bool dir_leases_enabled, enum smb3_tcon_ref_trace trace)
{
struct cifs_tcon *ret_buf;
+ static atomic_t tcon_debug_id;
ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL);
if (!ret_buf)
@@ -129,7 +131,8 @@ tcon_info_alloc(bool dir_leases_enabled)
atomic_inc(&tconInfoAllocCount);
ret_buf->status = TID_NEW;
- ++ret_buf->tc_count;
+ ret_buf->debug_id = atomic_inc_return(&tcon_debug_id);
+ ret_buf->tc_count = 1;
spin_lock_init(&ret_buf->tc_lock);
INIT_LIST_HEAD(&ret_buf->openFileList);
INIT_LIST_HEAD(&ret_buf->tcon_list);
@@ -138,27 +141,26 @@ tcon_info_alloc(bool dir_leases_enabled)
atomic_set(&ret_buf->num_local_opens, 0);
atomic_set(&ret_buf->num_remote_opens, 0);
ret_buf->stats_from_time = ktime_get_real_seconds();
-#ifdef CONFIG_CIFS_DFS_UPCALL
- INIT_LIST_HEAD(&ret_buf->dfs_ses_list);
+#ifdef CONFIG_CIFS_FSCACHE
+ mutex_init(&ret_buf->fscache_lock);
#endif
+ trace_smb3_tcon_ref(ret_buf->debug_id, ret_buf->tc_count, trace);
return ret_buf;
}
void
-tconInfoFree(struct cifs_tcon *tcon)
+tconInfoFree(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace)
{
if (tcon == NULL) {
cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
return;
}
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, trace);
free_cached_dirs(tcon->cfids);
atomic_dec(&tconInfoAllocCount);
kfree(tcon->nativeFileSystem);
kfree_sensitive(tcon->password);
-#ifdef CONFIG_CIFS_DFS_UPCALL
- dfs_put_root_smb_sessions(&tcon->dfs_ses_list);
-#endif
kfree(tcon->origin_fullpath);
kfree(tcon);
}
@@ -487,6 +489,8 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
/* look up tcon based on tid & uid */
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+ if (cifs_ses_exiting(ses))
+ continue;
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
if (tcon->tid != buf->Tid)
continue;
diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c
index a9eaba8083b0..212ec6f66ec6 100644
--- a/fs/smb/client/smb1ops.c
+++ b/fs/smb/client/smb1ops.c
@@ -753,11 +753,11 @@ cifs_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
}
-static void
+static int
cifs_close_file(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_fid *fid)
{
- CIFSSMBClose(xid, tcon, fid->netfid);
+ return CIFSSMBClose(xid, tcon, fid->netfid);
}
static int
diff --git a/fs/smb/client/smb2misc.c b/fs/smb/client/smb2misc.c
index 82b84a4941dd..677ef6f99a5b 100644
--- a/fs/smb/client/smb2misc.c
+++ b/fs/smb/client/smb2misc.c
@@ -622,6 +622,8 @@ smb2_is_valid_lease_break(char *buffer, struct TCP_Server_Info *server)
/* look up tcon based on tid & uid */
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+ if (cifs_ses_exiting(ses))
+ continue;
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
spin_lock(&tcon->open_file_lock);
cifs_stats_inc(
@@ -697,6 +699,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
/* look up tcon based on tid & uid */
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+ if (cifs_ses_exiting(ses))
+ continue;
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
spin_lock(&tcon->open_file_lock);
@@ -763,7 +767,7 @@ smb2_cancelled_close_fid(struct work_struct *work)
if (rc)
cifs_tcon_dbg(VFS, "Close cancelled mid failed rc:%d\n", rc);
- cifs_put_tcon(tcon);
+ cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cancelled_close_fid);
kfree(cancelled);
}
@@ -807,6 +811,8 @@ smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid,
if (tcon->tc_count <= 0) {
struct TCP_Server_Info *server = NULL;
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+ netfs_trace_tcon_ref_see_cancelled_close);
WARN_ONCE(tcon->tc_count < 0, "tcon refcount is negative");
spin_unlock(&cifs_tcp_ses_lock);
@@ -819,12 +825,14 @@ smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid,
return 0;
}
tcon->tc_count++;
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+ netfs_trace_tcon_ref_get_cancelled_close);
spin_unlock(&cifs_tcp_ses_lock);
rc = __smb2_handle_cancelled_cmd(tcon, SMB2_CLOSE_HE, 0,
persistent_fid, volatile_fid);
if (rc)
- cifs_put_tcon(tcon);
+ cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cancelled_close);
return rc;
}
@@ -852,7 +860,7 @@ smb2_handle_cancelled_mid(struct mid_q_entry *mid, struct TCP_Server_Info *serve
rsp->PersistentFileId,
rsp->VolatileFileId);
if (rc)
- cifs_put_tcon(tcon);
+ cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cancelled_mid);
return rc;
}
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 2ed456948f34..ef18cd30f66c 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -217,8 +217,8 @@ smb2_get_credits(struct mid_q_entry *mid)
}
static int
-smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
- unsigned int *num, struct cifs_credits *credits)
+smb2_wait_mtu_credits(struct TCP_Server_Info *server, size_t size,
+ size_t *num, struct cifs_credits *credits)
{
int rc = 0;
unsigned int scredits, in_flight;
@@ -1412,14 +1412,14 @@ smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
memcpy(cfile->fid.create_guid, fid->create_guid, 16);
}
-static void
+static int
smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_fid *fid)
{
- SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
+ return SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
}
-static void
+static int
smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
struct cifsFileInfo *cfile)
{
@@ -1430,7 +1430,7 @@ smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
rc = __SMB2_close(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, &file_inf);
if (rc)
- return;
+ return rc;
inode = d_inode(cfile->dentry);
@@ -1459,6 +1459,7 @@ smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
/* End of file and Attributes should not have to be updated on close */
spin_unlock(&inode->i_lock);
+ return rc;
}
static int
@@ -2480,6 +2481,8 @@ smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server)
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+ if (cifs_ses_exiting(ses))
+ continue;
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
if (tcon->tid == le32_to_cpu(shdr->Id.SyncId.TreeId)) {
spin_lock(&tcon->tc_lock);
@@ -2912,8 +2915,11 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
tcon = list_first_entry_or_null(&ses->tcon_list,
struct cifs_tcon,
tcon_list);
- if (tcon)
+ if (tcon) {
tcon->tc_count++;
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+ netfs_trace_tcon_ref_get_dfs_refer);
+ }
spin_unlock(&cifs_tcp_ses_lock);
}
@@ -2977,6 +2983,8 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
/* ipc tcons are not refcounted */
spin_lock(&cifs_tcp_ses_lock);
tcon->tc_count--;
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+ netfs_trace_tcon_ref_dec_dfs_refer);
/* tc_count can never go negative */
WARN_ON(tcon->tc_count < 0);
spin_unlock(&cifs_tcp_ses_lock);
@@ -3913,7 +3921,7 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
strcat(message, "W");
}
if (!new_oplock)
- strncpy(message, "None", sizeof(message));
+ strscpy(message, "None");
cinode->oplock = new_oplock;
cifs_dbg(FYI, "%s Lease granted on inode %p\n", message,
@@ -4482,7 +4490,7 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
unsigned int cur_off;
unsigned int cur_page_idx;
unsigned int pad_len;
- struct cifs_readdata *rdata = mid->callback_data;
+ struct cifs_io_subrequest *rdata = mid->callback_data;
struct smb2_hdr *shdr = (struct smb2_hdr *)buf;
int length;
bool use_rdma_mr = false;
@@ -4584,7 +4592,7 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
/* Copy the data to the output I/O iterator. */
rdata->result = cifs_copy_pages_to_iter(pages, pages_len,
- cur_off, &rdata->iter);
+ cur_off, &rdata->subreq.io_iter);
if (rdata->result != 0) {
if (is_offloaded)
mid->mid_state = MID_RESPONSE_MALFORMED;
@@ -4598,7 +4606,7 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
/* read response payload is in buf */
WARN_ONCE(pages && !xa_empty(pages),
"read data can be either in buf or in pages");
- length = copy_to_iter(buf + data_offset, data_len, &rdata->iter);
+ length = copy_to_iter(buf + data_offset, data_len, &rdata->subreq.io_iter);
if (length < 0)
return length;
rdata->got_bytes = data_len;
@@ -4961,68 +4969,84 @@ static int smb2_next_header(struct TCP_Server_Info *server, char *buf,
return 0;
}
-int cifs_sfu_make_node(unsigned int xid, struct inode *inode,
- struct dentry *dentry, struct cifs_tcon *tcon,
- const char *full_path, umode_t mode, dev_t dev)
+static int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
+ struct dentry *dentry, struct cifs_tcon *tcon,
+ const char *full_path, umode_t mode, dev_t dev)
{
- struct cifs_open_info_data buf = {};
struct TCP_Server_Info *server = tcon->ses->server;
struct cifs_open_parms oparms;
struct cifs_io_parms io_parms = {};
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifs_fid fid;
unsigned int bytes_written;
- struct win_dev *pdev;
+ struct win_dev pdev = {};
struct kvec iov[2];
__u32 oplock = server->oplocks ? REQ_OPLOCK : 0;
int rc;
- if (!S_ISCHR(mode) && !S_ISBLK(mode) && !S_ISFIFO(mode))
+ switch (mode & S_IFMT) {
+ case S_IFCHR:
+ strscpy(pdev.type, "IntxCHR");
+ pdev.major = cpu_to_le64(MAJOR(dev));
+ pdev.minor = cpu_to_le64(MINOR(dev));
+ break;
+ case S_IFBLK:
+ strscpy(pdev.type, "IntxBLK");
+ pdev.major = cpu_to_le64(MAJOR(dev));
+ pdev.minor = cpu_to_le64(MINOR(dev));
+ break;
+ case S_IFIFO:
+ strscpy(pdev.type, "LnxFIFO");
+ break;
+ default:
return -EPERM;
+ }
- oparms = (struct cifs_open_parms) {
- .tcon = tcon,
- .cifs_sb = cifs_sb,
- .desired_access = GENERIC_WRITE,
- .create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
- CREATE_OPTION_SPECIAL),
- .disposition = FILE_CREATE,
- .path = full_path,
- .fid = &fid,
- };
+ oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, GENERIC_WRITE,
+ FILE_CREATE, CREATE_NOT_DIR |
+ CREATE_OPTION_SPECIAL, ACL_NO_MODE);
+ oparms.fid = &fid;
- rc = server->ops->open(xid, &oparms, &oplock, &buf);
+ rc = server->ops->open(xid, &oparms, &oplock, NULL);
if (rc)
return rc;
- /*
- * BB Do not bother to decode buf since no local inode yet to put
- * timestamps in, but we can reuse it safely.
- */
- pdev = (struct win_dev *)&buf.fi;
io_parms.pid = current->tgid;
io_parms.tcon = tcon;
- io_parms.length = sizeof(*pdev);
- iov[1].iov_base = pdev;
- iov[1].iov_len = sizeof(*pdev);
- if (S_ISCHR(mode)) {
- memcpy(pdev->type, "IntxCHR", 8);
- pdev->major = cpu_to_le64(MAJOR(dev));
- pdev->minor = cpu_to_le64(MINOR(dev));
- } else if (S_ISBLK(mode)) {
- memcpy(pdev->type, "IntxBLK", 8);
- pdev->major = cpu_to_le64(MAJOR(dev));
- pdev->minor = cpu_to_le64(MINOR(dev));
- } else if (S_ISFIFO(mode)) {
- memcpy(pdev->type, "LnxFIFO", 8);
- }
+ io_parms.length = sizeof(pdev);
+ iov[1].iov_base = &pdev;
+ iov[1].iov_len = sizeof(pdev);
rc = server->ops->sync_write(xid, &fid, &io_parms,
&bytes_written, iov, 1);
server->ops->close(xid, tcon, &fid);
- d_drop(dentry);
- /* FIXME: add code here to set EAs */
- cifs_free_open_info(&buf);
+ return rc;
+}
+
+int cifs_sfu_make_node(unsigned int xid, struct inode *inode,
+ struct dentry *dentry, struct cifs_tcon *tcon,
+ const char *full_path, umode_t mode, dev_t dev)
+{
+ struct inode *new = NULL;
+ int rc;
+
+ rc = __cifs_sfu_make_node(xid, inode, dentry, tcon,
+ full_path, mode, dev);
+ if (rc)
+ return rc;
+
+ if (tcon->posix_extensions) {
+ rc = smb311_posix_get_inode_info(&new, full_path, NULL,
+ inode->i_sb, xid);
+ } else if (tcon->unix_ext) {
+ rc = cifs_get_inode_info_unix(&new, full_path,
+ inode->i_sb, xid);
+ } else {
+ rc = cifs_get_inode_info(&new, full_path, NULL,
+ inode->i_sb, xid, NULL);
+ }
+ if (!rc)
+ d_instantiate(dentry, new);
return rc;
}
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index 3ea688558e6c..993ac36c3d58 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -23,6 +23,8 @@
#include <linux/uuid.h>
#include <linux/pagemap.h>
#include <linux/xattr.h>
+#include <linux/netfs.h>
+#include <trace/events/netfs.h>
#include "cifsglob.h"
#include "cifsacl.h"
#include "cifsproto.h"
@@ -367,6 +369,17 @@ again:
}
rc = cifs_setup_session(0, ses, server, nls_codepage);
+ if ((rc == -EACCES) || (rc == -EKEYEXPIRED) || (rc == -EKEYREVOKED)) {
+ /*
+ * Try alternate password for next reconnect (key rotation
+ * could be enabled on the server e.g.) if an alternate
+ * password is available and the current password is expired,
+ * but do not swap on non pwd related errors like host down
+ */
+ if (ses->password2)
+ swap(ses->password2, ses->password);
+ }
+
if ((rc == -EACCES) && !tcon->retry) {
mutex_unlock(&ses->session_mutex);
rc = -EHOSTDOWN;
@@ -3628,9 +3641,9 @@ replay_again:
memcpy(&pbuf->network_open_info,
&rsp->network_open_info,
sizeof(pbuf->network_open_info));
+ atomic_dec(&tcon->num_remote_opens);
}
- atomic_dec(&tcon->num_remote_opens);
close_exit:
SMB2_close_free(&rqst);
free_rsp_buf(resp_buftype, rsp);
@@ -4127,6 +4140,8 @@ void smb2_reconnect_server(struct work_struct *work)
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
if (tcon->need_reconnect || tcon->need_reopen_files) {
tcon->tc_count++;
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+ netfs_trace_tcon_ref_get_reconnect_server);
list_add_tail(&tcon->rlist, &tmp_list);
tcon_selected = true;
}
@@ -4165,14 +4180,14 @@ void smb2_reconnect_server(struct work_struct *work)
if (tcon->ipc)
cifs_put_smb_ses(tcon->ses);
else
- cifs_put_tcon(tcon);
+ cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_reconnect_server);
}
if (!ses_exist)
goto done;
/* allocate a dummy tcon struct used for reconnect */
- tcon = tcon_info_alloc(false);
+ tcon = tcon_info_alloc(false, netfs_trace_tcon_ref_new_reconnect_server);
if (!tcon) {
resched = true;
list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
@@ -4195,7 +4210,7 @@ void smb2_reconnect_server(struct work_struct *work)
list_del_init(&ses->rlist);
cifs_put_smb_ses(ses);
}
- tconInfoFree(tcon);
+ tconInfoFree(tcon, netfs_trace_tcon_ref_free_reconnect_server);
done:
cifs_dbg(FYI, "Reconnecting tcons and channels finished\n");
@@ -4378,7 +4393,7 @@ static inline bool smb3_use_rdma_offload(struct cifs_io_parms *io_parms)
*/
static int
smb2_new_read_req(void **buf, unsigned int *total_len,
- struct cifs_io_parms *io_parms, struct cifs_readdata *rdata,
+ struct cifs_io_parms *io_parms, struct cifs_io_subrequest *rdata,
unsigned int remaining_bytes, int request_type)
{
int rc = -EACCES;
@@ -4406,10 +4421,12 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
req->Length = cpu_to_le32(io_parms->length);
req->Offset = cpu_to_le64(io_parms->offset);
- trace_smb3_read_enter(0 /* xid */,
- io_parms->persistent_fid,
- io_parms->tcon->tid, io_parms->tcon->ses->Suid,
- io_parms->offset, io_parms->length);
+ trace_smb3_read_enter(rdata ? rdata->rreq->debug_id : 0,
+ rdata ? rdata->subreq.debug_index : 0,
+ rdata ? rdata->xid : 0,
+ io_parms->persistent_fid,
+ io_parms->tcon->tid, io_parms->tcon->ses->Suid,
+ io_parms->offset, io_parms->length);
#ifdef CONFIG_CIFS_SMB_DIRECT
/*
* If we want to do a RDMA write, fill in and append
@@ -4419,7 +4436,7 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
struct smbd_buffer_descriptor_v1 *v1;
bool need_invalidate = server->dialect == SMB30_PROT_ID;
- rdata->mr = smbd_register_mr(server->smbd_conn, &rdata->iter,
+ rdata->mr = smbd_register_mr(server->smbd_conn, &rdata->subreq.io_iter,
true, need_invalidate);
if (!rdata->mr)
return -EAGAIN;
@@ -4470,8 +4487,8 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
static void
smb2_readv_callback(struct mid_q_entry *mid)
{
- struct cifs_readdata *rdata = mid->callback_data;
- struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
+ struct cifs_io_subrequest *rdata = mid->callback_data;
+ struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink);
struct TCP_Server_Info *server = rdata->server;
struct smb2_hdr *shdr =
(struct smb2_hdr *)rdata->iov[0].iov_base;
@@ -4479,17 +4496,17 @@ smb2_readv_callback(struct mid_q_entry *mid)
struct smb_rqst rqst = { .rq_iov = &rdata->iov[1], .rq_nvec = 1 };
if (rdata->got_bytes) {
- rqst.rq_iter = rdata->iter;
- rqst.rq_iter_size = iov_iter_count(&rdata->iter);
+ rqst.rq_iter = rdata->subreq.io_iter;
+ rqst.rq_iter_size = iov_iter_count(&rdata->subreq.io_iter);
}
WARN_ONCE(rdata->server != mid->server,
"rdata server %p != mid server %p",
rdata->server, mid->server);
- cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n",
+ cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%zu\n",
__func__, mid->mid, mid->mid_state, rdata->result,
- rdata->bytes);
+ rdata->subreq.len);
switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED:
@@ -4499,7 +4516,6 @@ smb2_readv_callback(struct mid_q_entry *mid)
if (server->sign && !mid->decrypted) {
int rc;
- iov_iter_revert(&rqst.rq_iter, rdata->got_bytes);
iov_iter_truncate(&rqst.rq_iter, rdata->got_bytes);
rc = smb2_verify_signature(&rqst, server);
if (rc)
@@ -4540,24 +4556,40 @@ smb2_readv_callback(struct mid_q_entry *mid)
#endif
if (rdata->result && rdata->result != -ENODATA) {
cifs_stats_fail_inc(tcon, SMB2_READ_HE);
- trace_smb3_read_err(0 /* xid */,
- rdata->cfile->fid.persistent_fid,
- tcon->tid, tcon->ses->Suid, rdata->offset,
- rdata->bytes, rdata->result);
+ trace_smb3_read_err(rdata->rreq->debug_id,
+ rdata->subreq.debug_index,
+ rdata->xid,
+ rdata->req->cfile->fid.persistent_fid,
+ tcon->tid, tcon->ses->Suid, rdata->subreq.start,
+ rdata->subreq.len, rdata->result);
} else
- trace_smb3_read_done(0 /* xid */,
- rdata->cfile->fid.persistent_fid,
+ trace_smb3_read_done(rdata->rreq->debug_id,
+ rdata->subreq.debug_index,
+ rdata->xid,
+ rdata->req->cfile->fid.persistent_fid,
tcon->tid, tcon->ses->Suid,
- rdata->offset, rdata->got_bytes);
+ rdata->subreq.start, rdata->got_bytes);
- queue_work(cifsiod_wq, &rdata->work);
+ if (rdata->result == -ENODATA) {
+ /* We may have got an EOF error because fallocate
+ * failed to enlarge the file.
+ */
+ if (rdata->subreq.start < rdata->subreq.rreq->i_size)
+ rdata->result = 0;
+ }
+ if (rdata->result == 0 || rdata->result == -EAGAIN)
+ iov_iter_advance(&rdata->subreq.io_iter, rdata->got_bytes);
+ rdata->credits.value = 0;
+ netfs_subreq_terminated(&rdata->subreq,
+ (rdata->result == 0 || rdata->result == -EAGAIN) ?
+ rdata->got_bytes : rdata->result, true);
release_mid(mid);
add_credits(server, &credits, 0);
}
/* smb2_async_readv - send an async read, and set up mid to handle result */
int
-smb2_async_readv(struct cifs_readdata *rdata)
+smb2_async_readv(struct cifs_io_subrequest *rdata)
{
int rc, flags = 0;
char *buf;
@@ -4566,22 +4598,22 @@ smb2_async_readv(struct cifs_readdata *rdata)
struct smb_rqst rqst = { .rq_iov = rdata->iov,
.rq_nvec = 1 };
struct TCP_Server_Info *server;
- struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
+ struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink);
unsigned int total_len;
int credit_request;
- cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
- __func__, rdata->offset, rdata->bytes);
+ cifs_dbg(FYI, "%s: offset=%llu bytes=%zu\n",
+ __func__, rdata->subreq.start, rdata->subreq.len);
if (!rdata->server)
rdata->server = cifs_pick_channel(tcon->ses);
- io_parms.tcon = tlink_tcon(rdata->cfile->tlink);
+ io_parms.tcon = tlink_tcon(rdata->req->cfile->tlink);
io_parms.server = server = rdata->server;
- io_parms.offset = rdata->offset;
- io_parms.length = rdata->bytes;
- io_parms.persistent_fid = rdata->cfile->fid.persistent_fid;
- io_parms.volatile_fid = rdata->cfile->fid.volatile_fid;
+ io_parms.offset = rdata->subreq.start;
+ io_parms.length = rdata->subreq.len;
+ io_parms.persistent_fid = rdata->req->cfile->fid.persistent_fid;
+ io_parms.volatile_fid = rdata->req->cfile->fid.volatile_fid;
io_parms.pid = rdata->pid;
rc = smb2_new_read_req(
@@ -4598,7 +4630,7 @@ smb2_async_readv(struct cifs_readdata *rdata)
shdr = (struct smb2_hdr *)buf;
if (rdata->credits.value > 0) {
- shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
+ shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->subreq.len,
SMB2_MAX_BUFFER_SIZE));
credit_request = le16_to_cpu(shdr->CreditCharge) + 8;
if (server->credits >= server->max_credits)
@@ -4608,22 +4640,22 @@ smb2_async_readv(struct cifs_readdata *rdata)
min_t(int, server->max_credits -
server->credits, credit_request));
- rc = adjust_credits(server, &rdata->credits, rdata->bytes);
+ rc = adjust_credits(server, &rdata->credits, rdata->subreq.len);
if (rc)
goto async_readv_out;
flags |= CIFS_HAS_CREDITS;
}
- kref_get(&rdata->refcount);
rc = cifs_call_async(server, &rqst,
cifs_readv_receive, smb2_readv_callback,
smb3_handle_read_data, rdata, flags,
&rdata->credits);
if (rc) {
- kref_put(&rdata->refcount, cifs_readdata_release);
cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
- trace_smb3_read_err(0 /* xid */, io_parms.persistent_fid,
+ trace_smb3_read_err(rdata->rreq->debug_id,
+ rdata->subreq.debug_index,
+ rdata->xid, io_parms.persistent_fid,
io_parms.tcon->tid,
io_parms.tcon->ses->Suid,
io_parms.offset, io_parms.length, rc);
@@ -4674,22 +4706,23 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
if (rc != -ENODATA) {
cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
cifs_dbg(VFS, "Send error in read = %d\n", rc);
- trace_smb3_read_err(xid,
+ trace_smb3_read_err(0, 0, xid,
req->PersistentFileId,
io_parms->tcon->tid, ses->Suid,
io_parms->offset, io_parms->length,
rc);
} else
- trace_smb3_read_done(xid, req->PersistentFileId, io_parms->tcon->tid,
+ trace_smb3_read_done(0, 0, xid,
+ req->PersistentFileId, io_parms->tcon->tid,
ses->Suid, io_parms->offset, 0);
free_rsp_buf(resp_buftype, rsp_iov.iov_base);
cifs_small_buf_release(req);
return rc == -ENODATA ? 0 : rc;
} else
- trace_smb3_read_done(xid,
- req->PersistentFileId,
- io_parms->tcon->tid, ses->Suid,
- io_parms->offset, io_parms->length);
+ trace_smb3_read_done(0, 0, xid,
+ req->PersistentFileId,
+ io_parms->tcon->tid, ses->Suid,
+ io_parms->offset, io_parms->length);
cifs_small_buf_release(req);
@@ -4722,12 +4755,13 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
static void
smb2_writev_callback(struct mid_q_entry *mid)
{
- struct cifs_writedata *wdata = mid->callback_data;
- struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
+ struct cifs_io_subrequest *wdata = mid->callback_data;
+ struct cifs_tcon *tcon = tlink_tcon(wdata->req->cfile->tlink);
struct TCP_Server_Info *server = wdata->server;
- unsigned int written;
struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
struct cifs_credits credits = { .value = 0, .instance = 0 };
+ ssize_t result = 0;
+ size_t written;
WARN_ONCE(wdata->server != mid->server,
"wdata server %p != mid server %p",
@@ -4737,8 +4771,8 @@ smb2_writev_callback(struct mid_q_entry *mid)
case MID_RESPONSE_RECEIVED:
credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
credits.instance = server->reconnect_instance;
- wdata->result = smb2_check_receive(mid, server, 0);
- if (wdata->result != 0)
+ result = smb2_check_receive(mid, server, 0);
+ if (result != 0)
break;
written = le32_to_cpu(rsp->DataLength);
@@ -4748,24 +4782,25 @@ smb2_writev_callback(struct mid_q_entry *mid)
* client. OS/2 servers are known to set incorrect
* CountHigh values.
*/
- if (written > wdata->bytes)
+ if (written > wdata->subreq.len)
written &= 0xFFFF;
- if (written < wdata->bytes)
+ if (written < wdata->subreq.len)
wdata->result = -ENOSPC;
else
- wdata->bytes = written;
+ wdata->subreq.len = written;
+ iov_iter_advance(&wdata->subreq.io_iter, written);
break;
case MID_REQUEST_SUBMITTED:
case MID_RETRY_NEEDED:
- wdata->result = -EAGAIN;
+ result = -EAGAIN;
break;
case MID_RESPONSE_MALFORMED:
credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
credits.instance = server->reconnect_instance;
fallthrough;
default:
- wdata->result = -EIO;
+ result = -EIO;
break;
}
#ifdef CONFIG_CIFS_SMB_DIRECT
@@ -4781,44 +4816,44 @@ smb2_writev_callback(struct mid_q_entry *mid)
wdata->mr = NULL;
}
#endif
- if (wdata->result) {
+ if (result) {
cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
- trace_smb3_write_err(0 /* no xid */,
- wdata->cfile->fid.persistent_fid,
- tcon->tid, tcon->ses->Suid, wdata->offset,
- wdata->bytes, wdata->result);
+ trace_smb3_write_err(wdata->xid,
+ wdata->req->cfile->fid.persistent_fid,
+ tcon->tid, tcon->ses->Suid, wdata->subreq.start,
+ wdata->subreq.len, wdata->result);
if (wdata->result == -ENOSPC)
pr_warn_once("Out of space writing to %s\n",
tcon->tree_name);
} else
trace_smb3_write_done(0 /* no xid */,
- wdata->cfile->fid.persistent_fid,
+ wdata->req->cfile->fid.persistent_fid,
tcon->tid, tcon->ses->Suid,
- wdata->offset, wdata->bytes);
+ wdata->subreq.start, wdata->subreq.len);
- queue_work(cifsiod_wq, &wdata->work);
+ wdata->credits.value = 0;
+ cifs_write_subrequest_terminated(wdata, result ?: written, true);
release_mid(mid);
add_credits(server, &credits, 0);
}
/* smb2_async_writev - send an async write, and set up mid to handle result */
-int
-smb2_async_writev(struct cifs_writedata *wdata,
- void (*release)(struct kref *kref))
+void
+smb2_async_writev(struct cifs_io_subrequest *wdata)
{
int rc = -EACCES, flags = 0;
struct smb2_write_req *req = NULL;
struct smb2_hdr *shdr;
- struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
+ struct cifs_tcon *tcon = tlink_tcon(wdata->req->cfile->tlink);
struct TCP_Server_Info *server = wdata->server;
struct kvec iov[1];
struct smb_rqst rqst = { };
- unsigned int total_len;
+ unsigned int total_len, xid = wdata->xid;
struct cifs_io_parms _io_parms;
struct cifs_io_parms *io_parms = NULL;
int credit_request;
- if (!wdata->server || wdata->replay)
+ if (!wdata->server || test_bit(NETFS_SREQ_RETRYING, &wdata->subreq.flags))
server = wdata->server = cifs_pick_channel(tcon->ses);
/*
@@ -4828,10 +4863,10 @@ smb2_async_writev(struct cifs_writedata *wdata,
_io_parms = (struct cifs_io_parms) {
.tcon = tcon,
.server = server,
- .offset = wdata->offset,
- .length = wdata->bytes,
- .persistent_fid = wdata->cfile->fid.persistent_fid,
- .volatile_fid = wdata->cfile->fid.volatile_fid,
+ .offset = wdata->subreq.start,
+ .length = wdata->subreq.len,
+ .persistent_fid = wdata->req->cfile->fid.persistent_fid,
+ .volatile_fid = wdata->req->cfile->fid.volatile_fid,
.pid = wdata->pid,
};
io_parms = &_io_parms;
@@ -4839,7 +4874,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
rc = smb2_plain_req_init(SMB2_WRITE, tcon, server,
(void **) &req, &total_len);
if (rc)
- return rc;
+ goto out;
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
@@ -4857,7 +4892,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
offsetof(struct smb2_write_req, Buffer));
req->RemainingBytes = 0;
- trace_smb3_write_enter(0 /* xid */,
+ trace_smb3_write_enter(wdata->xid,
io_parms->persistent_fid,
io_parms->tcon->tid,
io_parms->tcon->ses->Suid,
@@ -4871,10 +4906,10 @@ smb2_async_writev(struct cifs_writedata *wdata,
*/
if (smb3_use_rdma_offload(io_parms)) {
struct smbd_buffer_descriptor_v1 *v1;
- size_t data_size = iov_iter_count(&wdata->iter);
+ size_t data_size = iov_iter_count(&wdata->subreq.io_iter);
bool need_invalidate = server->dialect == SMB30_PROT_ID;
- wdata->mr = smbd_register_mr(server->smbd_conn, &wdata->iter,
+ wdata->mr = smbd_register_mr(server->smbd_conn, &wdata->subreq.io_iter,
false, need_invalidate);
if (!wdata->mr) {
rc = -EAGAIN;
@@ -4901,9 +4936,9 @@ smb2_async_writev(struct cifs_writedata *wdata,
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
- rqst.rq_iter = wdata->iter;
+ rqst.rq_iter = wdata->subreq.io_iter;
rqst.rq_iter_size = iov_iter_count(&rqst.rq_iter);
- if (wdata->replay)
+ if (test_bit(NETFS_SREQ_RETRYING, &wdata->subreq.flags))
smb2_set_replay(server, &rqst);
#ifdef CONFIG_CIFS_SMB_DIRECT
if (wdata->mr)
@@ -4921,7 +4956,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
#endif
if (wdata->credits.value > 0) {
- shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
+ shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->subreq.len,
SMB2_MAX_BUFFER_SIZE));
credit_request = le16_to_cpu(shdr->CreditCharge) + 8;
if (server->credits >= server->max_credits)
@@ -4938,25 +4973,27 @@ smb2_async_writev(struct cifs_writedata *wdata,
flags |= CIFS_HAS_CREDITS;
}
- kref_get(&wdata->refcount);
rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, NULL,
wdata, flags, &wdata->credits);
-
+ /* Can't touch wdata if rc == 0 */
if (rc) {
- trace_smb3_write_err(0 /* no xid */,
+ trace_smb3_write_err(xid,
io_parms->persistent_fid,
io_parms->tcon->tid,
io_parms->tcon->ses->Suid,
io_parms->offset,
io_parms->length,
rc);
- kref_put(&wdata->refcount, release);
cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
}
async_writev_out:
cifs_small_buf_release(req);
- return rc;
+out:
+ if (rc) {
+ add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
+ cifs_write_subrequest_terminated(wdata, rc, true);
+ }
}
/*
diff --git a/fs/smb/client/smb2pdu.h b/fs/smb/client/smb2pdu.h
index c72a3b2886b7..5c458ab3b05a 100644
--- a/fs/smb/client/smb2pdu.h
+++ b/fs/smb/client/smb2pdu.h
@@ -145,7 +145,7 @@ struct durable_context_v2 {
} __packed;
struct create_durable_v2 {
- struct create_context ccontext;
+ struct create_context_hdr ccontext;
__u8 Name[8];
struct durable_context_v2 dcontext;
} __packed;
@@ -167,7 +167,7 @@ struct durable_reconnect_context_v2_rsp {
} __packed;
struct create_durable_handle_reconnect_v2 {
- struct create_context ccontext;
+ struct create_context_hdr ccontext;
__u8 Name[8];
struct durable_reconnect_context_v2 dcontext;
__u8 Pad[4];
@@ -175,7 +175,7 @@ struct create_durable_handle_reconnect_v2 {
/* See MS-SMB2 2.2.13.2.5 */
struct crt_twarp_ctxt {
- struct create_context ccontext;
+ struct create_context_hdr ccontext;
__u8 Name[8];
__le64 Timestamp;
@@ -183,12 +183,12 @@ struct crt_twarp_ctxt {
/* See MS-SMB2 2.2.13.2.9 */
struct crt_query_id_ctxt {
- struct create_context ccontext;
+ struct create_context_hdr ccontext;
__u8 Name[8];
} __packed;
struct crt_sd_ctxt {
- struct create_context ccontext;
+ struct create_context_hdr ccontext;
__u8 Name[8];
struct smb3_sd sd;
} __packed;
@@ -320,7 +320,7 @@ struct smb2_file_reparse_point_info {
} __packed;
struct smb2_file_network_open_info {
- struct_group(network_open_info,
+ struct_group_attr(network_open_info, __packed,
__le64 CreationTime;
__le64 LastAccessTime;
__le64 LastWriteTime;
@@ -415,7 +415,7 @@ struct smb2_posix_info_parsed {
};
struct smb2_create_ea_ctx {
- struct create_context ctx;
+ struct create_context_hdr ctx;
__u8 name[8];
struct smb2_file_full_ea_info ea;
} __packed;
diff --git a/fs/smb/client/smb2proto.h b/fs/smb/client/smb2proto.h
index 732169d8a67a..b208232b12a2 100644
--- a/fs/smb/client/smb2proto.h
+++ b/fs/smb/client/smb2proto.h
@@ -210,11 +210,10 @@ extern int SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
extern int SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid,
__le64 *uniqueid);
-extern int smb2_async_readv(struct cifs_readdata *rdata);
+extern int smb2_async_readv(struct cifs_io_subrequest *rdata);
extern int SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
unsigned int *nbytes, char **buf, int *buf_type);
-extern int smb2_async_writev(struct cifs_writedata *wdata,
- void (*release)(struct kref *kref));
+extern void smb2_async_writev(struct cifs_io_subrequest *wdata);
extern int SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
unsigned int *nbytes, struct kvec *iov, int n_vec);
extern int SMB2_echo(struct TCP_Server_Info *server);
diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
index 5a3ca62d2f07..02135a605305 100644
--- a/fs/smb/client/smb2transport.c
+++ b/fs/smb/client/smb2transport.c
@@ -189,6 +189,8 @@ smb2_find_smb_sess_tcon_unlocked(struct cifs_ses *ses, __u32 tid)
if (tcon->tid != tid)
continue;
++tcon->tc_count;
+ trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+ netfs_trace_tcon_ref_get_find_sess_tcon);
return tcon;
}
@@ -659,7 +661,7 @@ smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server)
}
spin_unlock(&server->srv_lock);
if (!is_binding && !server->session_estab) {
- strncpy(shdr->Signature, "BSRSPYL", 8);
+ strscpy(shdr->Signature, "BSRSPYL");
return 0;
}
diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h
index f9c1fd32d0b8..af97389e983e 100644
--- a/fs/smb/client/trace.h
+++ b/fs/smb/client/trace.h
@@ -3,6 +3,9 @@
* Copyright (C) 2018, Microsoft Corporation.
*
* Author(s): Steve French <stfrench@microsoft.com>
+ *
+ * Please use this 3-part article as a reference for writing new tracepoints:
+ * https://lwn.net/Articles/379903/
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM cifs
@@ -15,12 +18,129 @@
#include <linux/inet.h>
/*
- * Please use this 3-part article as a reference for writing new tracepoints:
- * https://lwn.net/Articles/379903/
+ * Specify enums for tracing information.
*/
+#define smb3_tcon_ref_traces \
+ EM(netfs_trace_tcon_ref_dec_dfs_refer, "DEC DfsRef") \
+ EM(netfs_trace_tcon_ref_free, "FRE ") \
+ EM(netfs_trace_tcon_ref_free_fail, "FRE Fail ") \
+ EM(netfs_trace_tcon_ref_free_ipc, "FRE Ipc ") \
+ EM(netfs_trace_tcon_ref_free_ipc_fail, "FRE Ipc-F ") \
+ EM(netfs_trace_tcon_ref_free_reconnect_server, "FRE Reconn") \
+ EM(netfs_trace_tcon_ref_get_cancelled_close, "GET Cn-Cls") \
+ EM(netfs_trace_tcon_ref_get_dfs_refer, "GET DfsRef") \
+ EM(netfs_trace_tcon_ref_get_find, "GET Find ") \
+ EM(netfs_trace_tcon_ref_get_find_sess_tcon, "GET FndSes") \
+ EM(netfs_trace_tcon_ref_get_reconnect_server, "GET Reconn") \
+ EM(netfs_trace_tcon_ref_new, "NEW ") \
+ EM(netfs_trace_tcon_ref_new_ipc, "NEW Ipc ") \
+ EM(netfs_trace_tcon_ref_new_reconnect_server, "NEW Reconn") \
+ EM(netfs_trace_tcon_ref_put_cancelled_close, "PUT Cn-Cls") \
+ EM(netfs_trace_tcon_ref_put_cancelled_close_fid, "PUT Cn-Fid") \
+ EM(netfs_trace_tcon_ref_put_cancelled_mid, "PUT Cn-Mid") \
+ EM(netfs_trace_tcon_ref_put_mnt_ctx, "PUT MntCtx") \
+ EM(netfs_trace_tcon_ref_put_reconnect_server, "PUT Reconn") \
+ EM(netfs_trace_tcon_ref_put_tlink, "PUT Tlink ") \
+ EM(netfs_trace_tcon_ref_see_cancelled_close, "SEE Cn-Cls") \
+ EM(netfs_trace_tcon_ref_see_fscache_collision, "SEE FV-CO!") \
+ EM(netfs_trace_tcon_ref_see_fscache_okay, "SEE FV-Ok ") \
+ EM(netfs_trace_tcon_ref_see_fscache_relinq, "SEE FV-Rlq") \
+ E_(netfs_trace_tcon_ref_see_umount, "SEE Umount")
+
+#undef EM
+#undef E_
+
+/*
+ * Define those tracing enums.
+ */
+#ifndef __SMB3_DECLARE_TRACE_ENUMS_ONCE_ONLY
+#define __SMB3_DECLARE_TRACE_ENUMS_ONCE_ONLY
+
+#define EM(a, b) a,
+#define E_(a, b) a
+
+enum smb3_tcon_ref_trace { smb3_tcon_ref_traces } __mode(byte);
+
+#undef EM
+#undef E_
+#endif
+
+/*
+ * Export enum symbols via userspace.
+ */
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define E_(a, b) TRACE_DEFINE_ENUM(a);
+
+smb3_tcon_ref_traces;
+
+#undef EM
+#undef E_
+
+/*
+ * Now redefine the EM() and E_() macros to map the enums to the strings that
+ * will be printed in the output.
+ */
+#define EM(a, b) { a, b },
+#define E_(a, b) { a, b }
/* For logging errors in read or write */
DECLARE_EVENT_CLASS(smb3_rw_err_class,
+ TP_PROTO(unsigned int rreq_debug_id,
+ unsigned int rreq_debug_index,
+ unsigned int xid,
+ __u64 fid,
+ __u32 tid,
+ __u64 sesid,
+ __u64 offset,
+ __u32 len,
+ int rc),
+ TP_ARGS(rreq_debug_id, rreq_debug_index,
+ xid, fid, tid, sesid, offset, len, rc),
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq_debug_id)
+ __field(unsigned int, rreq_debug_index)
+ __field(unsigned int, xid)
+ __field(__u64, fid)
+ __field(__u32, tid)
+ __field(__u64, sesid)
+ __field(__u64, offset)
+ __field(__u32, len)
+ __field(int, rc)
+ ),
+ TP_fast_assign(
+ __entry->rreq_debug_id = rreq_debug_id;
+ __entry->rreq_debug_index = rreq_debug_index;
+ __entry->xid = xid;
+ __entry->fid = fid;
+ __entry->tid = tid;
+ __entry->sesid = sesid;
+ __entry->offset = offset;
+ __entry->len = len;
+ __entry->rc = rc;
+ ),
+ TP_printk("\tR=%08x[%x] xid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx len=0x%x rc=%d",
+ __entry->rreq_debug_id, __entry->rreq_debug_index,
+ __entry->xid, __entry->sesid, __entry->tid, __entry->fid,
+ __entry->offset, __entry->len, __entry->rc)
+)
+
+#define DEFINE_SMB3_RW_ERR_EVENT(name) \
+DEFINE_EVENT(smb3_rw_err_class, smb3_##name, \
+ TP_PROTO(unsigned int rreq_debug_id, \
+ unsigned int rreq_debug_index, \
+ unsigned int xid, \
+ __u64 fid, \
+ __u32 tid, \
+ __u64 sesid, \
+ __u64 offset, \
+ __u32 len, \
+ int rc), \
+ TP_ARGS(rreq_debug_id, rreq_debug_index, xid, fid, tid, sesid, offset, len, rc))
+
+DEFINE_SMB3_RW_ERR_EVENT(read_err);
+
+/* For logging errors in other file I/O ops */
+DECLARE_EVENT_CLASS(smb3_other_err_class,
TP_PROTO(unsigned int xid,
__u64 fid,
__u32 tid,
@@ -52,8 +172,8 @@ DECLARE_EVENT_CLASS(smb3_rw_err_class,
__entry->offset, __entry->len, __entry->rc)
)
-#define DEFINE_SMB3_RW_ERR_EVENT(name) \
-DEFINE_EVENT(smb3_rw_err_class, smb3_##name, \
+#define DEFINE_SMB3_OTHER_ERR_EVENT(name) \
+DEFINE_EVENT(smb3_other_err_class, smb3_##name, \
TP_PROTO(unsigned int xid, \
__u64 fid, \
__u32 tid, \
@@ -63,15 +183,67 @@ DEFINE_EVENT(smb3_rw_err_class, smb3_##name, \
int rc), \
TP_ARGS(xid, fid, tid, sesid, offset, len, rc))
-DEFINE_SMB3_RW_ERR_EVENT(write_err);
-DEFINE_SMB3_RW_ERR_EVENT(read_err);
-DEFINE_SMB3_RW_ERR_EVENT(query_dir_err);
-DEFINE_SMB3_RW_ERR_EVENT(zero_err);
-DEFINE_SMB3_RW_ERR_EVENT(falloc_err);
+DEFINE_SMB3_OTHER_ERR_EVENT(write_err);
+DEFINE_SMB3_OTHER_ERR_EVENT(query_dir_err);
+DEFINE_SMB3_OTHER_ERR_EVENT(zero_err);
+DEFINE_SMB3_OTHER_ERR_EVENT(falloc_err);
/* For logging successful read or write */
DECLARE_EVENT_CLASS(smb3_rw_done_class,
+ TP_PROTO(unsigned int rreq_debug_id,
+ unsigned int rreq_debug_index,
+ unsigned int xid,
+ __u64 fid,
+ __u32 tid,
+ __u64 sesid,
+ __u64 offset,
+ __u32 len),
+ TP_ARGS(rreq_debug_id, rreq_debug_index,
+ xid, fid, tid, sesid, offset, len),
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq_debug_id)
+ __field(unsigned int, rreq_debug_index)
+ __field(unsigned int, xid)
+ __field(__u64, fid)
+ __field(__u32, tid)
+ __field(__u64, sesid)
+ __field(__u64, offset)
+ __field(__u32, len)
+ ),
+ TP_fast_assign(
+ __entry->rreq_debug_id = rreq_debug_id;
+ __entry->rreq_debug_index = rreq_debug_index;
+ __entry->xid = xid;
+ __entry->fid = fid;
+ __entry->tid = tid;
+ __entry->sesid = sesid;
+ __entry->offset = offset;
+ __entry->len = len;
+ ),
+ TP_printk("R=%08x[%x] xid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx len=0x%x",
+ __entry->rreq_debug_id, __entry->rreq_debug_index,
+ __entry->xid, __entry->sesid, __entry->tid, __entry->fid,
+ __entry->offset, __entry->len)
+)
+
+#define DEFINE_SMB3_RW_DONE_EVENT(name) \
+DEFINE_EVENT(smb3_rw_done_class, smb3_##name, \
+ TP_PROTO(unsigned int rreq_debug_id, \
+ unsigned int rreq_debug_index, \
+ unsigned int xid, \
+ __u64 fid, \
+ __u32 tid, \
+ __u64 sesid, \
+ __u64 offset, \
+ __u32 len), \
+ TP_ARGS(rreq_debug_id, rreq_debug_index, xid, fid, tid, sesid, offset, len))
+
+DEFINE_SMB3_RW_DONE_EVENT(read_enter);
+DEFINE_SMB3_RW_DONE_EVENT(read_done);
+
+/* For logging successful other op */
+DECLARE_EVENT_CLASS(smb3_other_done_class,
TP_PROTO(unsigned int xid,
__u64 fid,
__u32 tid,
@@ -100,8 +272,8 @@ DECLARE_EVENT_CLASS(smb3_rw_done_class,
__entry->offset, __entry->len)
)
-#define DEFINE_SMB3_RW_DONE_EVENT(name) \
-DEFINE_EVENT(smb3_rw_done_class, smb3_##name, \
+#define DEFINE_SMB3_OTHER_DONE_EVENT(name) \
+DEFINE_EVENT(smb3_other_done_class, smb3_##name, \
TP_PROTO(unsigned int xid, \
__u64 fid, \
__u32 tid, \
@@ -110,16 +282,14 @@ DEFINE_EVENT(smb3_rw_done_class, smb3_##name, \
__u32 len), \
TP_ARGS(xid, fid, tid, sesid, offset, len))
-DEFINE_SMB3_RW_DONE_EVENT(write_enter);
-DEFINE_SMB3_RW_DONE_EVENT(read_enter);
-DEFINE_SMB3_RW_DONE_EVENT(query_dir_enter);
-DEFINE_SMB3_RW_DONE_EVENT(zero_enter);
-DEFINE_SMB3_RW_DONE_EVENT(falloc_enter);
-DEFINE_SMB3_RW_DONE_EVENT(write_done);
-DEFINE_SMB3_RW_DONE_EVENT(read_done);
-DEFINE_SMB3_RW_DONE_EVENT(query_dir_done);
-DEFINE_SMB3_RW_DONE_EVENT(zero_done);
-DEFINE_SMB3_RW_DONE_EVENT(falloc_done);
+DEFINE_SMB3_OTHER_DONE_EVENT(write_enter);
+DEFINE_SMB3_OTHER_DONE_EVENT(query_dir_enter);
+DEFINE_SMB3_OTHER_DONE_EVENT(zero_enter);
+DEFINE_SMB3_OTHER_DONE_EVENT(falloc_enter);
+DEFINE_SMB3_OTHER_DONE_EVENT(write_done);
+DEFINE_SMB3_OTHER_DONE_EVENT(query_dir_done);
+DEFINE_SMB3_OTHER_DONE_EVENT(zero_done);
+DEFINE_SMB3_OTHER_DONE_EVENT(falloc_done);
/* For logging successful set EOF (truncate) */
DECLARE_EVENT_CLASS(smb3_eof_class,
@@ -375,6 +545,7 @@ DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(get_reparse_compound_enter);
DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(delete_enter);
DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(mkdir_enter);
DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(tdis_enter);
+DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(mknod_enter);
DECLARE_EVENT_CLASS(smb3_inf_compound_done_class,
TP_PROTO(unsigned int xid,
@@ -415,7 +586,7 @@ DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(query_wsl_ea_compound_done);
DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(delete_done);
DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(mkdir_done);
DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(tdis_done);
-
+DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(mknod_done);
DECLARE_EVENT_CLASS(smb3_inf_compound_err_class,
TP_PROTO(unsigned int xid,
@@ -461,6 +632,7 @@ DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(query_wsl_ea_compound_err);
DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(mkdir_err);
DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(delete_err);
DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(tdis_err);
+DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(mknod_err);
/*
* For logging SMB3 Status code and Command for responses which return errors
@@ -1123,6 +1295,30 @@ DEFINE_SMB3_CREDIT_EVENT(waitff_credits);
DEFINE_SMB3_CREDIT_EVENT(overflow_credits);
DEFINE_SMB3_CREDIT_EVENT(set_credits);
+
+TRACE_EVENT(smb3_tcon_ref,
+ TP_PROTO(unsigned int tcon_debug_id, int ref,
+ enum smb3_tcon_ref_trace trace),
+ TP_ARGS(tcon_debug_id, ref, trace),
+ TP_STRUCT__entry(
+ __field(unsigned int, tcon)
+ __field(int, ref)
+ __field(enum smb3_tcon_ref_trace, trace)
+ ),
+ TP_fast_assign(
+ __entry->tcon = tcon_debug_id;
+ __entry->ref = ref;
+ __entry->trace = trace;
+ ),
+ TP_printk("TC=%08x %s r=%u",
+ __entry->tcon,
+ __print_symbolic(__entry->trace, smb3_tcon_ref_traces),
+ __entry->ref)
+ );
+
+
+#undef EM
+#undef E_
#endif /* _CIFS_TRACE_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
index 994d70193432..012b9bd06995 100644
--- a/fs/smb/client/transport.c
+++ b/fs/smb/client/transport.c
@@ -691,8 +691,8 @@ wait_for_compound_request(struct TCP_Server_Info *server, int num,
}
int
-cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
- unsigned int *num, struct cifs_credits *credits)
+cifs_wait_mtu_credits(struct TCP_Server_Info *server, size_t size,
+ size_t *num, struct cifs_credits *credits)
{
*num = size;
credits->value = 0;
@@ -909,12 +909,15 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
list_del_init(&mid->qhead);
mid->mid_flags |= MID_DELETED;
}
+ spin_unlock(&server->mid_lock);
cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
__func__, mid->mid, mid->mid_state);
rc = -EIO;
+ goto sync_mid_done;
}
spin_unlock(&server->mid_lock);
+sync_mid_done:
release_mid(mid);
return rc;
}
@@ -1057,9 +1060,11 @@ struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
index = (uint)atomic_inc_return(&ses->chan_seq);
index %= ses->chan_count;
}
+
+ server = ses->chans[index].server;
spin_unlock(&ses->chan_lock);
- return ses->chans[index].server;
+ return server;
}
int
@@ -1687,7 +1692,7 @@ __cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
static int
cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
{
- struct cifs_readdata *rdata = mid->callback_data;
+ struct cifs_io_subrequest *rdata = mid->callback_data;
return __cifs_readv_discard(server, mid, rdata->result);
}
@@ -1697,13 +1702,13 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
{
int length, len;
unsigned int data_offset, data_len;
- struct cifs_readdata *rdata = mid->callback_data;
+ struct cifs_io_subrequest *rdata = mid->callback_data;
char *buf = server->smallbuf;
unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server);
bool use_rdma_mr = false;
- cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n",
- __func__, mid->mid, rdata->offset, rdata->bytes);
+ cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%zu\n",
+ __func__, mid->mid, rdata->subreq.start, rdata->subreq.len);
/*
* read the rest of READ_RSP header (sans Data array), or whatever we
@@ -1808,8 +1813,11 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
length = data_len; /* An RDMA read is already done. */
else
#endif
- length = cifs_read_iter_from_socket(server, &rdata->iter,
+ {
+ length = cifs_read_iter_from_socket(server, &rdata->subreq.io_iter,
data_len);
+ iov_iter_revert(&rdata->subreq.io_iter, data_len);
+ }
if (length > 0)
rdata->got_bytes += length;
server->total_read += length;
diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
index 1b594307c9d5..8d10be1fe18a 100644
--- a/fs/smb/common/smb2pdu.h
+++ b/fs/smb/common/smb2pdu.h
@@ -711,7 +711,7 @@ struct smb2_close_rsp {
__le16 StructureSize; /* 60 */
__le16 Flags;
__le32 Reserved;
- struct_group(network_open_info,
+ struct_group_attr(network_open_info, __packed,
__le64 CreationTime;
__le64 LastAccessTime;
__le64 LastWriteTime;
@@ -1171,12 +1171,15 @@ struct smb2_server_client_notification {
#define SMB2_CREATE_FLAG_REPARSEPOINT 0x01
struct create_context {
- __le32 Next;
- __le16 NameOffset;
- __le16 NameLength;
- __le16 Reserved;
- __le16 DataOffset;
- __le32 DataLength;
+ /* New members must be added within the struct_group() macro below. */
+ __struct_group(create_context_hdr, hdr, __packed,
+ __le32 Next;
+ __le16 NameOffset;
+ __le16 NameLength;
+ __le16 Reserved;
+ __le16 DataOffset;
+ __le32 DataLength;
+ );
__u8 Buffer[];
} __packed;
@@ -1222,7 +1225,7 @@ struct smb2_create_rsp {
} __packed;
struct create_posix {
- struct create_context ccontext;
+ struct create_context_hdr ccontext;
__u8 Name[16];
__le32 Mode;
__u32 Reserved;
@@ -1230,7 +1233,7 @@ struct create_posix {
/* See MS-SMB2 2.2.13.2.3 and MS-SMB2 2.2.13.2.4 */
struct create_durable {
- struct create_context ccontext;
+ struct create_context_hdr ccontext;
__u8 Name[8];
union {
__u8 Reserved[16];
@@ -1243,14 +1246,14 @@ struct create_durable {
/* See MS-SMB2 2.2.13.2.5 */
struct create_mxac_req {
- struct create_context ccontext;
+ struct create_context_hdr ccontext;
__u8 Name[8];
__le64 Timestamp;
} __packed;
/* See MS-SMB2 2.2.14.2.5 */
struct create_mxac_rsp {
- struct create_context ccontext;
+ struct create_context_hdr ccontext;
__u8 Name[8];
__le32 QueryStatus;
__le32 MaximalAccess;
@@ -1286,13 +1289,13 @@ struct lease_context_v2 {
} __packed;
struct create_lease {
- struct create_context ccontext;
+ struct create_context_hdr ccontext;
__u8 Name[8];
struct lease_context lcontext;
} __packed;
struct create_lease_v2 {
- struct create_context ccontext;
+ struct create_context_hdr ccontext;
__u8 Name[8];
struct lease_context_v2 lcontext;
__u8 Pad[4];
@@ -1300,7 +1303,7 @@ struct create_lease_v2 {
/* See MS-SMB2 2.2.14.2.9 */
struct create_disk_id_rsp {
- struct create_context ccontext;
+ struct create_context_hdr ccontext;
__u8 Name[8];
__le64 DiskFileId;
__le64 VolumeId;
@@ -1309,7 +1312,7 @@ struct create_disk_id_rsp {
/* See MS-SMB2 2.2.13.2.13 */
struct create_app_inst_id {
- struct create_context ccontext;
+ struct create_context_hdr ccontext;
__u8 Name[16];
__le32 StructureSize; /* Must be 20 */
__u16 Reserved;
@@ -1318,7 +1321,7 @@ struct create_app_inst_id {
/* See MS-SMB2 2.2.13.2.15 */
struct create_app_inst_id_vers {
- struct create_context ccontext;
+ struct create_context_hdr ccontext;
__u8 Name[16];
__le32 StructureSize; /* Must be 24 */
__u16 Reserved;
diff --git a/fs/smb/server/ksmbd_netlink.h b/fs/smb/server/ksmbd_netlink.h
index 8ca8a45c4c62..f4e55199938d 100644
--- a/fs/smb/server/ksmbd_netlink.h
+++ b/fs/smb/server/ksmbd_netlink.h
@@ -167,7 +167,8 @@ struct ksmbd_share_config_response {
__u16 force_uid;
__u16 force_gid;
__s8 share_name[KSMBD_REQ_MAX_SHARE_NAME];
- __u32 reserved[112]; /* Reserved room */
+ __u32 reserved[111]; /* Reserved room */
+ __u32 payload_sz;
__u32 veto_list_sz;
__s8 ____payload[];
};
@@ -339,23 +340,24 @@ enum KSMBD_TREE_CONN_STATUS {
/*
* Share config flags.
*/
-#define KSMBD_SHARE_FLAG_INVALID (0)
-#define KSMBD_SHARE_FLAG_AVAILABLE BIT(0)
-#define KSMBD_SHARE_FLAG_BROWSEABLE BIT(1)
-#define KSMBD_SHARE_FLAG_WRITEABLE BIT(2)
-#define KSMBD_SHARE_FLAG_READONLY BIT(3)
-#define KSMBD_SHARE_FLAG_GUEST_OK BIT(4)
-#define KSMBD_SHARE_FLAG_GUEST_ONLY BIT(5)
-#define KSMBD_SHARE_FLAG_STORE_DOS_ATTRS BIT(6)
-#define KSMBD_SHARE_FLAG_OPLOCKS BIT(7)
-#define KSMBD_SHARE_FLAG_PIPE BIT(8)
-#define KSMBD_SHARE_FLAG_HIDE_DOT_FILES BIT(9)
-#define KSMBD_SHARE_FLAG_INHERIT_OWNER BIT(10)
-#define KSMBD_SHARE_FLAG_STREAMS BIT(11)
-#define KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS BIT(12)
-#define KSMBD_SHARE_FLAG_ACL_XATTR BIT(13)
-#define KSMBD_SHARE_FLAG_UPDATE BIT(14)
-#define KSMBD_SHARE_FLAG_CROSSMNT BIT(15)
+#define KSMBD_SHARE_FLAG_INVALID (0)
+#define KSMBD_SHARE_FLAG_AVAILABLE BIT(0)
+#define KSMBD_SHARE_FLAG_BROWSEABLE BIT(1)
+#define KSMBD_SHARE_FLAG_WRITEABLE BIT(2)
+#define KSMBD_SHARE_FLAG_READONLY BIT(3)
+#define KSMBD_SHARE_FLAG_GUEST_OK BIT(4)
+#define KSMBD_SHARE_FLAG_GUEST_ONLY BIT(5)
+#define KSMBD_SHARE_FLAG_STORE_DOS_ATTRS BIT(6)
+#define KSMBD_SHARE_FLAG_OPLOCKS BIT(7)
+#define KSMBD_SHARE_FLAG_PIPE BIT(8)
+#define KSMBD_SHARE_FLAG_HIDE_DOT_FILES BIT(9)
+#define KSMBD_SHARE_FLAG_INHERIT_OWNER BIT(10)
+#define KSMBD_SHARE_FLAG_STREAMS BIT(11)
+#define KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS BIT(12)
+#define KSMBD_SHARE_FLAG_ACL_XATTR BIT(13)
+#define KSMBD_SHARE_FLAG_UPDATE BIT(14)
+#define KSMBD_SHARE_FLAG_CROSSMNT BIT(15)
+#define KSMBD_SHARE_FLAG_CONTINUOUS_AVAILABILITY BIT(16)
/*
* Tree connect request flags.
diff --git a/fs/smb/server/mgmt/share_config.c b/fs/smb/server/mgmt/share_config.c
index 328a412259dc..a2f0a2edceb8 100644
--- a/fs/smb/server/mgmt/share_config.c
+++ b/fs/smb/server/mgmt/share_config.c
@@ -158,7 +158,12 @@ static struct ksmbd_share_config *share_config_request(struct unicode_map *um,
share->name = kstrdup(name, GFP_KERNEL);
if (!test_share_config_flag(share, KSMBD_SHARE_FLAG_PIPE)) {
- share->path = kstrdup(ksmbd_share_config_path(resp),
+ int path_len = PATH_MAX;
+
+ if (resp->payload_sz)
+ path_len = resp->payload_sz - resp->veto_list_sz;
+
+ share->path = kstrndup(ksmbd_share_config_path(resp), path_len,
GFP_KERNEL);
if (share->path)
share->path_sz = strlen(share->path);
diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
index 4978edfb15f9..b9d9116fc2b3 100644
--- a/fs/smb/server/oplock.c
+++ b/fs/smb/server/oplock.c
@@ -207,9 +207,9 @@ static void opinfo_add(struct oplock_info *opinfo)
{
struct ksmbd_inode *ci = opinfo->o_fp->f_ci;
- write_lock(&ci->m_lock);
+ down_write(&ci->m_lock);
list_add_rcu(&opinfo->op_entry, &ci->m_op_list);
- write_unlock(&ci->m_lock);
+ up_write(&ci->m_lock);
}
static void opinfo_del(struct oplock_info *opinfo)
@@ -221,9 +221,9 @@ static void opinfo_del(struct oplock_info *opinfo)
lease_del_list(opinfo);
write_unlock(&lease_list_lock);
}
- write_lock(&ci->m_lock);
+ down_write(&ci->m_lock);
list_del_rcu(&opinfo->op_entry);
- write_unlock(&ci->m_lock);
+ up_write(&ci->m_lock);
}
static unsigned long opinfo_count(struct ksmbd_file *fp)
@@ -526,21 +526,18 @@ static struct oplock_info *same_client_has_lease(struct ksmbd_inode *ci,
* Compare lease key and client_guid to know request from same owner
* of same client
*/
- read_lock(&ci->m_lock);
+ down_read(&ci->m_lock);
list_for_each_entry(opinfo, &ci->m_op_list, op_entry) {
if (!opinfo->is_lease || !opinfo->conn)
continue;
- read_unlock(&ci->m_lock);
lease = opinfo->o_lease;
ret = compare_guid_key(opinfo, client_guid, lctx->lease_key);
if (ret) {
m_opinfo = opinfo;
/* skip upgrading lease about breaking lease */
- if (atomic_read(&opinfo->breaking_cnt)) {
- read_lock(&ci->m_lock);
+ if (atomic_read(&opinfo->breaking_cnt))
continue;
- }
/* upgrading lease */
if ((atomic_read(&ci->op_count) +
@@ -570,9 +567,8 @@ static struct oplock_info *same_client_has_lease(struct ksmbd_inode *ci,
lease_none_upgrade(opinfo, lctx->req_state);
}
}
- read_lock(&ci->m_lock);
}
- read_unlock(&ci->m_lock);
+ up_read(&ci->m_lock);
return m_opinfo;
}
@@ -613,13 +609,23 @@ static int oplock_break_pending(struct oplock_info *opinfo, int req_op_level)
if (opinfo->op_state == OPLOCK_CLOSING)
return -ENOENT;
- else if (!opinfo->is_lease && opinfo->level <= req_op_level)
- return 1;
+ else if (opinfo->level <= req_op_level) {
+ if (opinfo->is_lease &&
+ opinfo->o_lease->state !=
+ (SMB2_LEASE_HANDLE_CACHING_LE |
+ SMB2_LEASE_READ_CACHING_LE))
+ return 1;
+ }
}
- if (!opinfo->is_lease && opinfo->level <= req_op_level) {
- wake_up_oplock_break(opinfo);
- return 1;
+ if (opinfo->level <= req_op_level) {
+ if (opinfo->is_lease &&
+ opinfo->o_lease->state !=
+ (SMB2_LEASE_HANDLE_CACHING_LE |
+ SMB2_LEASE_READ_CACHING_LE)) {
+ wake_up_oplock_break(opinfo);
+ return 1;
+ }
}
return 0;
}
@@ -887,7 +893,6 @@ static int oplock_break(struct oplock_info *brk_opinfo, int req_op_level)
struct lease *lease = brk_opinfo->o_lease;
atomic_inc(&brk_opinfo->breaking_cnt);
-
err = oplock_break_pending(brk_opinfo, req_op_level);
if (err)
return err < 0 ? err : 0;
@@ -1105,7 +1110,7 @@ void smb_send_parent_lease_break_noti(struct ksmbd_file *fp,
if (!p_ci)
return;
- read_lock(&p_ci->m_lock);
+ down_read(&p_ci->m_lock);
list_for_each_entry(opinfo, &p_ci->m_op_list, op_entry) {
if (opinfo->conn == NULL || !opinfo->is_lease)
continue;
@@ -1123,13 +1128,11 @@ void smb_send_parent_lease_break_noti(struct ksmbd_file *fp,
continue;
}
- read_unlock(&p_ci->m_lock);
oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE);
opinfo_conn_put(opinfo);
- read_lock(&p_ci->m_lock);
}
}
- read_unlock(&p_ci->m_lock);
+ up_read(&p_ci->m_lock);
ksmbd_inode_put(p_ci);
}
@@ -1150,7 +1153,7 @@ void smb_lazy_parent_lease_break_close(struct ksmbd_file *fp)
if (!p_ci)
return;
- read_lock(&p_ci->m_lock);
+ down_read(&p_ci->m_lock);
list_for_each_entry(opinfo, &p_ci->m_op_list, op_entry) {
if (opinfo->conn == NULL || !opinfo->is_lease)
continue;
@@ -1164,13 +1167,11 @@ void smb_lazy_parent_lease_break_close(struct ksmbd_file *fp)
atomic_dec(&opinfo->conn->r_count);
continue;
}
- read_unlock(&p_ci->m_lock);
oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE);
opinfo_conn_put(opinfo);
- read_lock(&p_ci->m_lock);
}
}
- read_unlock(&p_ci->m_lock);
+ up_read(&p_ci->m_lock);
ksmbd_inode_put(p_ci);
}
@@ -1200,7 +1201,9 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
/* Only v2 leases handle the directory */
if (S_ISDIR(file_inode(fp->filp)->i_mode)) {
- if (!lctx || lctx->version != 2)
+ if (!lctx || lctx->version != 2 ||
+ (lctx->flags != SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE &&
+ !lctx->epoch))
return 0;
}
@@ -1465,8 +1468,9 @@ void create_lease_buf(u8 *rbuf, struct lease *lease)
buf->lcontext.LeaseFlags = lease->flags;
buf->lcontext.Epoch = cpu_to_le16(lease->epoch);
buf->lcontext.LeaseState = lease->state;
- memcpy(buf->lcontext.ParentLeaseKey, lease->parent_lease_key,
- SMB2_LEASE_KEY_SIZE);
+ if (lease->flags == SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE)
+ memcpy(buf->lcontext.ParentLeaseKey, lease->parent_lease_key,
+ SMB2_LEASE_KEY_SIZE);
buf->ccontext.DataOffset = cpu_to_le16(offsetof
(struct create_lease_v2, lcontext));
buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
@@ -1525,8 +1529,9 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
lreq->flags = lc->lcontext.LeaseFlags;
lreq->epoch = lc->lcontext.Epoch;
lreq->duration = lc->lcontext.LeaseDuration;
- memcpy(lreq->parent_lease_key, lc->lcontext.ParentLeaseKey,
- SMB2_LEASE_KEY_SIZE);
+ if (lreq->flags == SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE)
+ memcpy(lreq->parent_lease_key, lc->lcontext.ParentLeaseKey,
+ SMB2_LEASE_KEY_SIZE);
lreq->version = 2;
} else {
struct create_lease *lc = (struct create_lease *)cc;
diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c
index c0788188aa82..c67fbc8d6683 100644
--- a/fs/smb/server/server.c
+++ b/fs/smb/server/server.c
@@ -167,20 +167,17 @@ static void __handle_ksmbd_work(struct ksmbd_work *work,
int rc;
bool is_chained = false;
- if (conn->ops->allocate_rsp_buf(work))
- return;
-
if (conn->ops->is_transform_hdr &&
conn->ops->is_transform_hdr(work->request_buf)) {
rc = conn->ops->decrypt_req(work);
- if (rc < 0) {
- conn->ops->set_rsp_status(work, STATUS_DATA_ERROR);
- goto send;
- }
-
+ if (rc < 0)
+ return;
work->encrypted = true;
}
+ if (conn->ops->allocate_rsp_buf(work))
+ return;
+
rc = conn->ops->init_rsp_hdr(work);
if (rc) {
/* either uid or tid is not correct */
diff --git a/fs/smb/server/smb2ops.c b/fs/smb/server/smb2ops.c
index a45f7dca482e..606aa3c5189a 100644
--- a/fs/smb/server/smb2ops.c
+++ b/fs/smb/server/smb2ops.c
@@ -228,6 +228,11 @@ void init_smb3_0_server(struct ksmbd_conn *conn)
conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION)
conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
+ if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION ||
+ (!(server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION_OFF) &&
+ conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION))
+ conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
+
if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL)
conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL;
}
@@ -278,11 +283,6 @@ int init_smb3_11_server(struct ksmbd_conn *conn)
conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING |
SMB2_GLOBAL_CAP_DIRECTORY_LEASING;
- if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION ||
- (!(server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_ENCRYPTION_OFF) &&
- conn->cli_cap & SMB2_GLOBAL_CAP_ENCRYPTION))
- conn->vals->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
-
if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL)
conn->vals->capabilities |= SMB2_GLOBAL_CAP_MULTI_CHANNEL;
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index d478fa0c57ab..b6c5a8ea3887 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -535,6 +535,10 @@ int smb2_allocate_rsp_buf(struct ksmbd_work *work)
if (cmd == SMB2_QUERY_INFO_HE) {
struct smb2_query_info_req *req;
+ if (get_rfc1002_len(work->request_buf) <
+ offsetof(struct smb2_query_info_req, OutputBufferLength))
+ return -EINVAL;
+
req = smb2_get_msg(work->request_buf);
if ((req->InfoType == SMB2_O_INFO_FILE &&
(req->FileInfoClass == FILE_FULL_EA_INFORMATION ||
@@ -1922,7 +1926,7 @@ int smb2_tree_connect(struct ksmbd_work *work)
struct ksmbd_session *sess = work->sess;
char *treename = NULL, *name = NULL;
struct ksmbd_tree_conn_status status;
- struct ksmbd_share_config *share;
+ struct ksmbd_share_config *share = NULL;
int rc = -EINVAL;
WORK_BUFFERS(work, req, rsp);
@@ -1984,7 +1988,12 @@ int smb2_tree_connect(struct ksmbd_work *work)
write_unlock(&sess->tree_conns_lock);
rsp->StructureSize = cpu_to_le16(16);
out_err1:
- rsp->Capabilities = 0;
+ if (server_conf.flags & KSMBD_GLOBAL_FLAG_DURABLE_HANDLE && share &&
+ test_share_config_flag(share,
+ KSMBD_SHARE_FLAG_CONTINUOUS_AVAILABILITY))
+ rsp->Capabilities = SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY;
+ else
+ rsp->Capabilities = 0;
rsp->Reserved = 0;
/* default manual caching */
rsp->ShareFlags = SMB2_SHAREFLAG_MANUAL_CACHING;
@@ -3367,9 +3376,9 @@ int smb2_open(struct ksmbd_work *work)
* after daccess, saccess, attrib_only, and stream are
* initialized.
*/
- write_lock(&fp->f_ci->m_lock);
+ down_write(&fp->f_ci->m_lock);
list_add(&fp->node, &fp->f_ci->m_fp_list);
- write_unlock(&fp->f_ci->m_lock);
+ up_write(&fp->f_ci->m_lock);
/* Check delete pending among previous fp before oplock break */
if (ksmbd_inode_pending_delete(fp)) {
@@ -3498,7 +3507,9 @@ int smb2_open(struct ksmbd_work *work)
memcpy(fp->client_guid, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE);
if (dh_info.type == DURABLE_REQ_V2 || dh_info.type == DURABLE_REQ) {
- if (dh_info.type == DURABLE_REQ_V2 && dh_info.persistent)
+ if (dh_info.type == DURABLE_REQ_V2 && dh_info.persistent &&
+ test_share_config_flag(work->tcon->share_conf,
+ KSMBD_SHARE_FLAG_CONTINUOUS_AVAILABILITY))
fp->is_persistent = true;
else
fp->is_durable = true;
@@ -5857,8 +5868,9 @@ static int smb2_rename(struct ksmbd_work *work,
if (!file_info->ReplaceIfExists)
flags = RENAME_NOREPLACE;
- smb_break_all_levII_oplock(work, fp, 0);
rc = ksmbd_vfs_rename(work, &fp->filp->f_path, new_name, flags);
+ if (!rc)
+ smb_break_all_levII_oplock(work, fp, 0);
out:
kfree(new_name);
return rc;
diff --git a/fs/smb/server/smb2pdu.h b/fs/smb/server/smb2pdu.h
index bd1d2a0e9203..643f5e1cfe35 100644
--- a/fs/smb/server/smb2pdu.h
+++ b/fs/smb/server/smb2pdu.h
@@ -64,7 +64,7 @@ struct preauth_integrity_info {
#define SMB2_SESSION_TIMEOUT (10 * HZ)
struct create_durable_req_v2 {
- struct create_context ccontext;
+ struct create_context_hdr ccontext;
__u8 Name[8];
__le32 Timeout;
__le32 Flags;
@@ -73,7 +73,7 @@ struct create_durable_req_v2 {
} __packed;
struct create_durable_reconn_req {
- struct create_context ccontext;
+ struct create_context_hdr ccontext;
__u8 Name[8];
union {
__u8 Reserved[16];
@@ -85,7 +85,7 @@ struct create_durable_reconn_req {
} __packed;
struct create_durable_reconn_v2_req {
- struct create_context ccontext;
+ struct create_context_hdr ccontext;
__u8 Name[8];
struct {
__u64 PersistentFileId;
@@ -96,13 +96,13 @@ struct create_durable_reconn_v2_req {
} __packed;
struct create_alloc_size_req {
- struct create_context ccontext;
+ struct create_context_hdr ccontext;
__u8 Name[8];
__le64 AllocationSize;
} __packed;
struct create_durable_rsp {
- struct create_context ccontext;
+ struct create_context_hdr ccontext;
__u8 Name[8];
union {
__u8 Reserved[8];
@@ -114,7 +114,7 @@ struct create_durable_rsp {
/* Flags */
#define SMB2_DHANDLE_FLAG_PERSISTENT 0x00000002
struct create_durable_v2_rsp {
- struct create_context ccontext;
+ struct create_context_hdr ccontext;
__u8 Name[8];
__le32 Timeout;
__le32 Flags;
@@ -122,7 +122,7 @@ struct create_durable_v2_rsp {
/* equivalent of the contents of SMB3.1.1 POSIX open context response */
struct create_posix_rsp {
- struct create_context ccontext;
+ struct create_context_hdr ccontext;
__u8 Name[16];
__le32 nlink;
__le32 reparse_tag;
@@ -381,13 +381,13 @@ struct smb2_ea_info {
} __packed; /* level 15 Query */
struct create_ea_buf_req {
- struct create_context ccontext;
+ struct create_context_hdr ccontext;
__u8 Name[8];
struct smb2_ea_info ea;
} __packed;
struct create_sd_buf_req {
- struct create_context ccontext;
+ struct create_context_hdr ccontext;
__u8 Name[8];
struct smb_ntsd ntsd;
} __packed;
diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
index fcaf373cc008..474dadf6b7b8 100644
--- a/fs/smb/server/smb_common.c
+++ b/fs/smb/server/smb_common.c
@@ -646,7 +646,7 @@ int ksmbd_smb_check_shared_mode(struct file *filp, struct ksmbd_file *curr_fp)
* Lookup fp in master fp list, and check desired access and
* shared mode between previous open and current open.
*/
- read_lock(&curr_fp->f_ci->m_lock);
+ down_read(&curr_fp->f_ci->m_lock);
list_for_each_entry(prev_fp, &curr_fp->f_ci->m_fp_list, node) {
if (file_inode(filp) != file_inode(prev_fp->filp))
continue;
@@ -722,7 +722,7 @@ int ksmbd_smb_check_shared_mode(struct file *filp, struct ksmbd_file *curr_fp)
break;
}
}
- read_unlock(&curr_fp->f_ci->m_lock);
+ up_read(&curr_fp->f_ci->m_lock);
return rc;
}
diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c
index f29bb03f0dc4..8752ac82c557 100644
--- a/fs/smb/server/transport_ipc.c
+++ b/fs/smb/server/transport_ipc.c
@@ -65,6 +65,7 @@ struct ipc_msg_table_entry {
struct hlist_node ipc_table_hlist;
void *response;
+ unsigned int msg_sz;
};
static struct delayed_work ipc_timer_work;
@@ -275,6 +276,7 @@ static int handle_response(int type, void *payload, size_t sz)
}
memcpy(entry->response, payload, sz);
+ entry->msg_sz = sz;
wake_up_interruptible(&entry->wait);
ret = 0;
break;
@@ -453,6 +455,34 @@ out:
return ret;
}
+static int ipc_validate_msg(struct ipc_msg_table_entry *entry)
+{
+ unsigned int msg_sz = entry->msg_sz;
+
+ if (entry->type == KSMBD_EVENT_RPC_REQUEST) {
+ struct ksmbd_rpc_command *resp = entry->response;
+
+ msg_sz = sizeof(struct ksmbd_rpc_command) + resp->payload_sz;
+ } else if (entry->type == KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST) {
+ struct ksmbd_spnego_authen_response *resp = entry->response;
+
+ msg_sz = sizeof(struct ksmbd_spnego_authen_response) +
+ resp->session_key_len + resp->spnego_blob_len;
+ } else if (entry->type == KSMBD_EVENT_SHARE_CONFIG_REQUEST) {
+ struct ksmbd_share_config_response *resp = entry->response;
+
+ if (resp->payload_sz) {
+ if (resp->payload_sz < resp->veto_list_sz)
+ return -EINVAL;
+
+ msg_sz = sizeof(struct ksmbd_share_config_response) +
+ resp->payload_sz;
+ }
+ }
+
+ return entry->msg_sz != msg_sz ? -EINVAL : 0;
+}
+
static void *ipc_msg_send_request(struct ksmbd_ipc_msg *msg, unsigned int handle)
{
struct ipc_msg_table_entry entry;
@@ -477,6 +507,13 @@ static void *ipc_msg_send_request(struct ksmbd_ipc_msg *msg, unsigned int handle
ret = wait_event_interruptible_timeout(entry.wait,
entry.response != NULL,
IPC_WAIT_TIMEOUT);
+ if (entry.response) {
+ ret = ipc_validate_msg(&entry);
+ if (ret) {
+ kvfree(entry.response);
+ entry.response = NULL;
+ }
+ }
out:
down_write(&ipc_msg_table_lock);
hash_del(&entry.ipc_table_hlist);
diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c
index 002a3f0dc7c5..6633fa78e9b9 100644
--- a/fs/smb/server/transport_tcp.c
+++ b/fs/smb/server/transport_tcp.c
@@ -448,6 +448,10 @@ static int create_socket(struct interface *iface)
sin6.sin6_family = PF_INET6;
sin6.sin6_addr = in6addr_any;
sin6.sin6_port = htons(server_conf.tcp_port);
+
+ lock_sock(ksmbd_socket->sk);
+ ksmbd_socket->sk->sk_ipv6only = false;
+ release_sock(ksmbd_socket->sk);
}
ksmbd_tcp_nodelay(ksmbd_socket);
diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
index 22f0f3db3ac9..51b1b0bed616 100644
--- a/fs/smb/server/vfs.c
+++ b/fs/smb/server/vfs.c
@@ -754,10 +754,15 @@ retry:
goto out4;
}
+ /*
+ * explicitly handle file overwrite case, for compatibility with
+ * filesystems that may not support rename flags (e.g: fuse)
+ */
if ((flags & RENAME_NOREPLACE) && d_is_positive(new_dentry)) {
err = -EEXIST;
goto out4;
}
+ flags &= ~(RENAME_NOREPLACE);
if (old_child == trap) {
err = -EINVAL;
diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c
index 030f70700036..6cb599cd287e 100644
--- a/fs/smb/server/vfs_cache.c
+++ b/fs/smb/server/vfs_cache.c
@@ -165,7 +165,7 @@ static int ksmbd_inode_init(struct ksmbd_inode *ci, struct ksmbd_file *fp)
ci->m_fattr = 0;
INIT_LIST_HEAD(&ci->m_fp_list);
INIT_LIST_HEAD(&ci->m_op_list);
- rwlock_init(&ci->m_lock);
+ init_rwsem(&ci->m_lock);
ci->m_de = fp->filp->f_path.dentry;
return 0;
}
@@ -261,14 +261,14 @@ static void __ksmbd_inode_close(struct ksmbd_file *fp)
}
if (atomic_dec_and_test(&ci->m_count)) {
- write_lock(&ci->m_lock);
+ down_write(&ci->m_lock);
if (ci->m_flags & (S_DEL_ON_CLS | S_DEL_PENDING)) {
ci->m_flags &= ~(S_DEL_ON_CLS | S_DEL_PENDING);
- write_unlock(&ci->m_lock);
+ up_write(&ci->m_lock);
ksmbd_vfs_unlink(filp);
- write_lock(&ci->m_lock);
+ down_write(&ci->m_lock);
}
- write_unlock(&ci->m_lock);
+ up_write(&ci->m_lock);
ksmbd_inode_free(ci);
}
@@ -289,9 +289,9 @@ static void __ksmbd_remove_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp
if (!has_file_id(fp->volatile_id))
return;
- write_lock(&fp->f_ci->m_lock);
+ down_write(&fp->f_ci->m_lock);
list_del_init(&fp->node);
- write_unlock(&fp->f_ci->m_lock);
+ up_write(&fp->f_ci->m_lock);
write_lock(&ft->lock);
idr_remove(ft->idr, fp->volatile_id);
@@ -523,17 +523,17 @@ struct ksmbd_file *ksmbd_lookup_fd_inode(struct dentry *dentry)
if (!ci)
return NULL;
- read_lock(&ci->m_lock);
+ down_read(&ci->m_lock);
list_for_each_entry(lfp, &ci->m_fp_list, node) {
if (inode == file_inode(lfp->filp)) {
atomic_dec(&ci->m_count);
lfp = ksmbd_fp_get(lfp);
- read_unlock(&ci->m_lock);
+ up_read(&ci->m_lock);
return lfp;
}
}
atomic_dec(&ci->m_count);
- read_unlock(&ci->m_lock);
+ up_read(&ci->m_lock);
return NULL;
}
@@ -705,13 +705,13 @@ static bool session_fd_check(struct ksmbd_tree_connect *tcon,
conn = fp->conn;
ci = fp->f_ci;
- write_lock(&ci->m_lock);
+ down_write(&ci->m_lock);
list_for_each_entry_rcu(op, &ci->m_op_list, op_entry) {
if (op->conn != conn)
continue;
op->conn = NULL;
}
- write_unlock(&ci->m_lock);
+ up_write(&ci->m_lock);
fp->conn = NULL;
fp->tcon = NULL;
@@ -801,13 +801,13 @@ int ksmbd_reopen_durable_fd(struct ksmbd_work *work, struct ksmbd_file *fp)
fp->tcon = work->tcon;
ci = fp->f_ci;
- write_lock(&ci->m_lock);
+ down_write(&ci->m_lock);
list_for_each_entry_rcu(op, &ci->m_op_list, op_entry) {
if (op->conn)
continue;
op->conn = fp->conn;
}
- write_unlock(&ci->m_lock);
+ up_write(&ci->m_lock);
__open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID);
if (!has_file_id(fp->volatile_id)) {
diff --git a/fs/smb/server/vfs_cache.h b/fs/smb/server/vfs_cache.h
index ed44fb4e18e7..5a225e7055f1 100644
--- a/fs/smb/server/vfs_cache.h
+++ b/fs/smb/server/vfs_cache.h
@@ -47,7 +47,7 @@ struct stream {
};
struct ksmbd_inode {
- rwlock_t m_lock;
+ struct rw_semaphore m_lock;
atomic_t m_count;
atomic_t op_count;
/* opinfo count for streams */
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index e8df6430444b..a8c1e7f9a609 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -375,8 +375,6 @@ void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer,
flush_dcache_page(page);
if (copied == avail)
SetPageUptodate(page);
- else
- SetPageError(page);
}
/* Copy data into page cache */
@@ -471,7 +469,7 @@ static int squashfs_read_folio(struct file *file, struct folio *folio)
res = read_blocklist(inode, index, &block);
if (res < 0)
- goto error_out;
+ goto out;
if (res == 0)
res = squashfs_readpage_sparse(page, expected);
@@ -483,8 +481,6 @@ static int squashfs_read_folio(struct file *file, struct folio *folio)
if (!res)
return 0;
-error_out:
- SetPageError(page);
out:
pageaddr = kmap_atomic(page);
memset(pageaddr, 0, PAGE_SIZE);
diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c
index 763a3f7a75f6..2a689ce71de9 100644
--- a/fs/squashfs/file_direct.c
+++ b/fs/squashfs/file_direct.c
@@ -106,14 +106,13 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
return 0;
mark_errored:
- /* Decompression failed, mark pages as errored. Target_page is
+ /* Decompression failed. Target_page is
* dealt with by the caller
*/
for (i = 0; i < pages; i++) {
if (page[i] == NULL || page[i] == target_page)
continue;
flush_dcache_page(page[i]);
- SetPageError(page[i]);
unlock_page(page[i]);
put_page(page[i]);
}
diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
index aa3411354e66..16bd693d0b3a 100644
--- a/fs/squashfs/inode.c
+++ b/fs/squashfs/inode.c
@@ -48,6 +48,10 @@ static int squashfs_new_inode(struct super_block *sb, struct inode *inode,
gid_t i_gid;
int err;
+ inode->i_ino = le32_to_cpu(sqsh_ino->inode_number);
+ if (inode->i_ino == 0)
+ return -EINVAL;
+
err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->uid), &i_uid);
if (err)
return err;
@@ -58,7 +62,6 @@ static int squashfs_new_inode(struct super_block *sb, struct inode *inode,
i_uid_write(inode, i_uid);
i_gid_write(inode, i_gid);
- inode->i_ino = le32_to_cpu(sqsh_ino->inode_number);
inode_set_mtime(inode, le32_to_cpu(sqsh_ino->mtime), 0);
inode_set_atime(inode, inode_get_mtime_sec(inode), 0);
inode_set_ctime(inode, inode_get_mtime_sec(inode), 0);
diff --git a/fs/squashfs/namei.c b/fs/squashfs/namei.c
index 11e4539b9eae..65aae7e2a859 100644
--- a/fs/squashfs/namei.c
+++ b/fs/squashfs/namei.c
@@ -62,27 +62,21 @@
*/
static int get_dir_index_using_name(struct super_block *sb,
u64 *next_block, int *next_offset, u64 index_start,
- int index_offset, int i_count, const char *name,
- int len)
+ int index_offset, int i_count, const char *name)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
int i, length = 0, err;
unsigned int size;
struct squashfs_dir_index *index;
- char *str;
TRACE("Entered get_dir_index_using_name, i_count %d\n", i_count);
- index = kmalloc(sizeof(*index) + SQUASHFS_NAME_LEN * 2 + 2, GFP_KERNEL);
+ index = kmalloc(sizeof(*index) + SQUASHFS_NAME_LEN + 1, GFP_KERNEL);
if (index == NULL) {
ERROR("Failed to allocate squashfs_dir_index\n");
goto out;
}
- str = &index->name[SQUASHFS_NAME_LEN + 1];
- strncpy(str, name, len);
- str[len] = '\0';
-
for (i = 0; i < i_count; i++) {
err = squashfs_read_metadata(sb, index, &index_start,
&index_offset, sizeof(*index));
@@ -101,7 +95,7 @@ static int get_dir_index_using_name(struct super_block *sb,
index->name[size] = '\0';
- if (strcmp(index->name, str) > 0)
+ if (strcmp(index->name, name) > 0)
break;
length = le32_to_cpu(index->index);
@@ -153,7 +147,7 @@ static struct dentry *squashfs_lookup(struct inode *dir, struct dentry *dentry,
length = get_dir_index_using_name(dir->i_sb, &block, &offset,
squashfs_i(dir)->dir_idx_start,
squashfs_i(dir)->dir_idx_offset,
- squashfs_i(dir)->dir_idx_cnt, name, len);
+ squashfs_i(dir)->dir_idx_cnt, name);
while (length < i_size_read(dir)) {
/*
diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c
index 2bf977a52c2c..6ef735bd841a 100644
--- a/fs/squashfs/symlink.c
+++ b/fs/squashfs/symlink.c
@@ -32,20 +32,19 @@
static int squashfs_symlink_read_folio(struct file *file, struct folio *folio)
{
- struct page *page = &folio->page;
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct super_block *sb = inode->i_sb;
struct squashfs_sb_info *msblk = sb->s_fs_info;
- int index = page->index << PAGE_SHIFT;
+ int index = folio_pos(folio);
u64 block = squashfs_i(inode)->start;
int offset = squashfs_i(inode)->offset;
int length = min_t(int, i_size_read(inode) - index, PAGE_SIZE);
- int bytes, copied;
+ int bytes, copied, error;
void *pageaddr;
struct squashfs_cache_entry *entry;
TRACE("Entered squashfs_symlink_readpage, page index %ld, start block "
- "%llx, offset %x\n", page->index, block, offset);
+ "%llx, offset %x\n", folio->index, block, offset);
/*
* Skip index bytes into symlink metadata.
@@ -57,14 +56,15 @@ static int squashfs_symlink_read_folio(struct file *file, struct folio *folio)
ERROR("Unable to read symlink [%llx:%x]\n",
squashfs_i(inode)->start,
squashfs_i(inode)->offset);
- goto error_out;
+ error = bytes;
+ goto out;
}
}
/*
* Read length bytes from symlink metadata. Squashfs_read_metadata
* is not used here because it can sleep and we want to use
- * kmap_atomic to map the page. Instead call the underlying
+ * kmap_local to map the folio. Instead call the underlying
* squashfs_cache_get routine. As length bytes may overlap metadata
* blocks, we may need to call squashfs_cache_get multiple times.
*/
@@ -75,29 +75,26 @@ static int squashfs_symlink_read_folio(struct file *file, struct folio *folio)
squashfs_i(inode)->start,
squashfs_i(inode)->offset);
squashfs_cache_put(entry);
- goto error_out;
+ error = entry->error;
+ goto out;
}
- pageaddr = kmap_atomic(page);
+ pageaddr = kmap_local_folio(folio, 0);
copied = squashfs_copy_data(pageaddr + bytes, entry, offset,
length - bytes);
if (copied == length - bytes)
memset(pageaddr + length, 0, PAGE_SIZE - length);
else
block = entry->next_index;
- kunmap_atomic(pageaddr);
+ kunmap_local(pageaddr);
squashfs_cache_put(entry);
}
- flush_dcache_page(page);
- SetPageUptodate(page);
- unlock_page(page);
- return 0;
-
-error_out:
- SetPageError(page);
- unlock_page(page);
- return 0;
+ flush_dcache_folio(folio);
+ error = 0;
+out:
+ folio_end_read(folio, error == 0);
+ return error;
}
diff --git a/fs/stat.c b/fs/stat.c
index 77cdc69eb422..70bd3e888cfa 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -658,6 +658,7 @@ cp_statx(const struct kstat *stat, struct statx __user *buffer)
tmp.stx_mnt_id = stat->mnt_id;
tmp.stx_dio_mem_align = stat->dio_mem_align;
tmp.stx_dio_offset_align = stat->dio_offset_align;
+ tmp.stx_subvol = stat->subvol;
return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0;
}
diff --git a/fs/super.c b/fs/super.c
index 81656c7807db..b72f1d288e95 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -1516,29 +1516,11 @@ static int fs_bdev_thaw(struct block_device *bdev)
return error;
}
-static void fs_bdev_super_get(void *data)
-{
- struct super_block *sb = data;
-
- spin_lock(&sb_lock);
- sb->s_count++;
- spin_unlock(&sb_lock);
-}
-
-static void fs_bdev_super_put(void *data)
-{
- struct super_block *sb = data;
-
- put_super(sb);
-}
-
const struct blk_holder_ops fs_holder_ops = {
.mark_dead = fs_bdev_mark_dead,
.sync = fs_bdev_sync,
.freeze = fs_bdev_freeze,
.thaw = fs_bdev_thaw,
- .get_holder = fs_bdev_super_get,
- .put_holder = fs_bdev_super_put,
};
EXPORT_SYMBOL_GPL(fs_holder_ops);
@@ -1563,7 +1545,7 @@ int setup_bdev_super(struct super_block *sb, int sb_flags,
* writable from userspace even for a read-only block device.
*/
if ((mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) {
- fput(bdev_file);
+ bdev_fput(bdev_file);
return -EACCES;
}
@@ -1574,7 +1556,7 @@ int setup_bdev_super(struct super_block *sb, int sb_flags,
if (atomic_read(&bdev->bd_fsfreeze_count) > 0) {
if (fc)
warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
- fput(bdev_file);
+ bdev_fput(bdev_file);
return -EBUSY;
}
spin_lock(&sb_lock);
@@ -1694,7 +1676,7 @@ void kill_block_super(struct super_block *sb)
generic_shutdown_super(sb);
if (bdev) {
sync_blockdev(bdev);
- fput(sb->s_bdev_file);
+ bdev_fput(sb->s_bdev_file);
}
}
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 6b7652fb8050..7cd64021d453 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -463,6 +463,8 @@ struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
kn = kernfs_find_and_get(kobj->sd, attr->name);
if (kn)
kernfs_break_active_protection(kn);
+ else
+ kobject_put(kobj);
return kn;
}
EXPORT_SYMBOL_GPL(sysfs_break_active_protection);
diff --git a/fs/timerfd.c b/fs/timerfd.c
index e9c96a0c79f1..4bf2f8bfec11 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -262,17 +262,18 @@ static __poll_t timerfd_poll(struct file *file, poll_table *wait)
return events;
}
-static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
- loff_t *ppos)
+static ssize_t timerfd_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
+ struct file *file = iocb->ki_filp;
struct timerfd_ctx *ctx = file->private_data;
ssize_t res;
u64 ticks = 0;
- if (count < sizeof(ticks))
+ if (iov_iter_count(to) < sizeof(ticks))
return -EINVAL;
+
spin_lock_irq(&ctx->wqh.lock);
- if (file->f_flags & O_NONBLOCK)
+ if (file->f_flags & O_NONBLOCK || iocb->ki_flags & IOCB_NOWAIT)
res = -EAGAIN;
else
res = wait_event_interruptible_locked_irq(ctx->wqh, ctx->ticks);
@@ -312,8 +313,11 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
ctx->ticks = 0;
}
spin_unlock_irq(&ctx->wqh.lock);
- if (ticks)
- res = put_user(ticks, (u64 __user *) buf) ? -EFAULT: sizeof(ticks);
+ if (ticks) {
+ res = copy_to_iter(&ticks, sizeof(ticks), to);
+ if (!res)
+ res = -EFAULT;
+ }
return res;
}
@@ -384,7 +388,7 @@ static long timerfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg
static const struct file_operations timerfd_fops = {
.release = timerfd_release,
.poll = timerfd_poll,
- .read = timerfd_read,
+ .read_iter = timerfd_read_iter,
.llseek = noop_llseek,
.show_fdinfo = timerfd_show,
.unlocked_ioctl = timerfd_ioctl,
@@ -407,6 +411,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
{
int ufd;
struct timerfd_ctx *ctx;
+ struct file *file;
/* Check the TFD_* constants for consistency. */
BUILD_BUG_ON(TFD_CLOEXEC != O_CLOEXEC);
@@ -443,11 +448,22 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
ctx->moffs = ktime_mono_to_real(0);
- ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx,
- O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS));
- if (ufd < 0)
+ ufd = get_unused_fd_flags(flags & TFD_SHARED_FCNTL_FLAGS);
+ if (ufd < 0) {
+ kfree(ctx);
+ return ufd;
+ }
+
+ file = anon_inode_getfile("[timerfd]", &timerfd_fops, ctx,
+ O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS));
+ if (IS_ERR(file)) {
+ put_unused_fd(ufd);
kfree(ctx);
+ return PTR_ERR(file);
+ }
+ file->f_mode |= FMODE_NOWAIT;
+ fd_install(ufd, file);
return ufd;
}
diff --git a/fs/tracefs/event_inode.c b/fs/tracefs/event_inode.c
index dc067eeb6387..0256afdd4acf 100644
--- a/fs/tracefs/event_inode.c
+++ b/fs/tracefs/event_inode.c
@@ -37,6 +37,7 @@ static DEFINE_MUTEX(eventfs_mutex);
struct eventfs_root_inode {
struct eventfs_inode ei;
+ struct inode *parent_inode;
struct dentry *events_dir;
};
@@ -68,11 +69,25 @@ enum {
EVENTFS_SAVE_MODE = BIT(16),
EVENTFS_SAVE_UID = BIT(17),
EVENTFS_SAVE_GID = BIT(18),
- EVENTFS_TOPLEVEL = BIT(19),
};
#define EVENTFS_MODE_MASK (EVENTFS_SAVE_MODE - 1)
+static void free_ei_rcu(struct rcu_head *rcu)
+{
+ struct eventfs_inode *ei = container_of(rcu, struct eventfs_inode, rcu);
+ struct eventfs_root_inode *rei;
+
+ kfree(ei->entry_attrs);
+ kfree_const(ei->name);
+ if (ei->is_events) {
+ rei = get_root_inode(ei);
+ kfree(rei);
+ } else {
+ kfree(ei);
+ }
+}
+
/*
* eventfs_inode reference count management.
*
@@ -84,18 +99,17 @@ enum {
static void release_ei(struct kref *ref)
{
struct eventfs_inode *ei = container_of(ref, struct eventfs_inode, kref);
- struct eventfs_root_inode *rei;
+ const struct eventfs_entry *entry;
WARN_ON_ONCE(!ei->is_freed);
- kfree(ei->entry_attrs);
- kfree_const(ei->name);
- if (ei->is_events) {
- rei = get_root_inode(ei);
- kfree_rcu(rei, ei.rcu);
- } else {
- kfree_rcu(ei, rcu);
+ for (int i = 0; i < ei->nr_entries; i++) {
+ entry = &ei->entries[i];
+ if (entry->release)
+ entry->release(entry->name, ei->data);
}
+
+ call_rcu(&ei->rcu, free_ei_rcu);
}
static inline void put_ei(struct eventfs_inode *ei)
@@ -112,6 +126,18 @@ static inline void free_ei(struct eventfs_inode *ei)
}
}
+/*
+ * Called when creation of an ei fails, do not call release() functions.
+ */
+static inline void cleanup_ei(struct eventfs_inode *ei)
+{
+ if (ei) {
+ /* Set nr_entries to 0 to prevent release() function being called */
+ ei->nr_entries = 0;
+ free_ei(ei);
+ }
+}
+
static inline struct eventfs_inode *get_ei(struct eventfs_inode *ei)
{
if (ei)
@@ -181,21 +207,7 @@ static int eventfs_set_attr(struct mnt_idmap *idmap, struct dentry *dentry,
* determined by the parent directory.
*/
if (dentry->d_inode->i_mode & S_IFDIR) {
- /*
- * The events directory dentry is never freed, unless its
- * part of an instance that is deleted. It's attr is the
- * default for its child files and directories.
- * Do not update it. It's not used for its own mode or ownership.
- */
- if (ei->is_events) {
- /* But it still needs to know if it was modified */
- if (iattr->ia_valid & ATTR_UID)
- ei->attr.mode |= EVENTFS_SAVE_UID;
- if (iattr->ia_valid & ATTR_GID)
- ei->attr.mode |= EVENTFS_SAVE_GID;
- } else {
- update_attr(&ei->attr, iattr);
- }
+ update_attr(&ei->attr, iattr);
} else {
name = dentry->d_name.name;
@@ -213,18 +225,25 @@ static int eventfs_set_attr(struct mnt_idmap *idmap, struct dentry *dentry,
return ret;
}
-static void update_top_events_attr(struct eventfs_inode *ei, struct super_block *sb)
+static void update_events_attr(struct eventfs_inode *ei, struct super_block *sb)
{
- struct inode *root;
+ struct eventfs_root_inode *rei;
+ struct inode *parent;
- /* Only update if the "events" was on the top level */
- if (!ei || !(ei->attr.mode & EVENTFS_TOPLEVEL))
- return;
+ rei = get_root_inode(ei);
+
+ /* Use the parent inode permissions unless root set its permissions */
+ parent = rei->parent_inode;
- /* Get the tracefs root inode. */
- root = d_inode(sb->s_root);
- ei->attr.uid = root->i_uid;
- ei->attr.gid = root->i_gid;
+ if (rei->ei.attr.mode & EVENTFS_SAVE_UID)
+ ei->attr.uid = rei->ei.attr.uid;
+ else
+ ei->attr.uid = parent->i_uid;
+
+ if (rei->ei.attr.mode & EVENTFS_SAVE_GID)
+ ei->attr.gid = rei->ei.attr.gid;
+ else
+ ei->attr.gid = parent->i_gid;
}
static void set_top_events_ownership(struct inode *inode)
@@ -233,10 +252,10 @@ static void set_top_events_ownership(struct inode *inode)
struct eventfs_inode *ei = ti->private;
/* The top events directory doesn't get automatically updated */
- if (!ei || !ei->is_events || !(ei->attr.mode & EVENTFS_TOPLEVEL))
+ if (!ei || !ei->is_events)
return;
- update_top_events_attr(ei, inode->i_sb);
+ update_events_attr(ei, inode->i_sb);
if (!(ei->attr.mode & EVENTFS_SAVE_UID))
inode->i_uid = ei->attr.uid;
@@ -265,7 +284,7 @@ static int eventfs_permission(struct mnt_idmap *idmap,
return generic_permission(idmap, inode, mask);
}
-static const struct inode_operations eventfs_root_dir_inode_operations = {
+static const struct inode_operations eventfs_dir_inode_operations = {
.lookup = eventfs_root_lookup,
.setattr = eventfs_set_attr,
.getattr = eventfs_get_attr,
@@ -282,6 +301,35 @@ static const struct file_operations eventfs_file_operations = {
.llseek = generic_file_llseek,
};
+/*
+ * On a remount of tracefs, if UID or GID options are set, then
+ * the mount point inode permissions should be used.
+ * Reset the saved permission flags appropriately.
+ */
+void eventfs_remount(struct tracefs_inode *ti, bool update_uid, bool update_gid)
+{
+ struct eventfs_inode *ei = ti->private;
+
+ if (!ei)
+ return;
+
+ if (update_uid)
+ ei->attr.mode &= ~EVENTFS_SAVE_UID;
+
+ if (update_gid)
+ ei->attr.mode &= ~EVENTFS_SAVE_GID;
+
+ if (!ei->entry_attrs)
+ return;
+
+ for (int i = 0; i < ei->nr_entries; i++) {
+ if (update_uid)
+ ei->entry_attrs[i].mode &= ~EVENTFS_SAVE_UID;
+ if (update_gid)
+ ei->entry_attrs[i].mode &= ~EVENTFS_SAVE_GID;
+ }
+}
+
/* Return the evenfs_inode of the "events" directory */
static struct eventfs_inode *eventfs_find_events(struct dentry *dentry)
{
@@ -297,14 +345,13 @@ static struct eventfs_inode *eventfs_find_events(struct dentry *dentry)
* If the ei is being freed, the ownership of the children
* doesn't matter.
*/
- if (ei->is_freed) {
- ei = NULL;
- break;
- }
+ if (ei->is_freed)
+ return NULL;
+
// Walk upwards until you find the events inode
} while (!ei->is_events);
- update_top_events_attr(ei, dentry->d_sb);
+ update_events_attr(ei, dentry->d_sb);
return ei;
}
@@ -336,6 +383,7 @@ static void update_inode_attr(struct dentry *dentry, struct inode *inode,
/**
* lookup_file - look up a file in the tracefs filesystem
+ * @parent_ei: Pointer to the eventfs_inode that represents parent of the file
* @dentry: the dentry to look up
* @mode: the permission that the file should have.
* @attr: saved attributes changed by user
@@ -389,6 +437,7 @@ static struct dentry *lookup_file(struct eventfs_inode *parent_ei,
/**
* lookup_dir_entry - look up a dir in the tracefs filesystem
* @dentry: the directory to look up
+ * @pei: Pointer to the parent eventfs_inode if available
* @ei: the eventfs_inode that represents the directory to create
*
* This function will look up a dentry for a directory represented by
@@ -408,7 +457,7 @@ static struct dentry *lookup_dir_entry(struct dentry *dentry,
update_inode_attr(dentry, inode, &ei->attr,
S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO);
- inode->i_op = &eventfs_root_dir_inode_operations;
+ inode->i_op = &eventfs_dir_inode_operations;
inode->i_fop = &eventfs_file_operations;
/* All directories will have the same inode number */
@@ -478,16 +527,20 @@ void eventfs_d_release(struct dentry *dentry)
/**
* lookup_file_dentry - create a dentry for a file of an eventfs_inode
+ * @dentry: The parent dentry under which the new file's dentry will be created
* @ei: the eventfs_inode that the file will be created under
* @idx: the index into the entry_attrs[] of the @ei
- * @parent: The parent dentry of the created file.
- * @name: The name of the file to create
* @mode: The mode of the file.
* @data: The data to use to set the inode of the file with on open()
* @fops: The fops of the file to be created.
*
- * Create a dentry for a file of an eventfs_inode @ei and place it into the
- * address located at @e_dentry.
+ * This function creates a dentry for a file associated with an
+ * eventfs_inode @ei. It uses the entry attributes specified by @idx,
+ * if available. The file will have the specified @mode and its inode will be
+ * set up with @data upon open. The file operations will be set to @fops.
+ *
+ * Return: Returns a pointer to the newly created file's dentry or an error
+ * pointer.
*/
static struct dentry *
lookup_file_dentry(struct dentry *dentry,
@@ -728,7 +781,7 @@ struct eventfs_inode *eventfs_create_dir(const char *name, struct eventfs_inode
/* Was the parent freed? */
if (list_empty(&ei->list)) {
- free_ei(ei);
+ cleanup_ei(ei);
ei = NULL;
}
return ei;
@@ -775,6 +828,7 @@ struct eventfs_inode *eventfs_create_events_dir(const char *name, struct dentry
// Note: we have a ref to the dentry from tracefs_start_creating()
rei = get_root_inode(ei);
rei->events_dir = dentry;
+ rei->parent_inode = d_inode(dentry->d_sb->s_root);
ei->entries = entries;
ei->nr_entries = size;
@@ -784,29 +838,26 @@ struct eventfs_inode *eventfs_create_events_dir(const char *name, struct dentry
uid = d_inode(dentry->d_parent)->i_uid;
gid = d_inode(dentry->d_parent)->i_gid;
- /*
- * If the events directory is of the top instance, then parent
- * is NULL. Set the attr.mode to reflect this and its permissions will
- * default to the tracefs root dentry.
- */
- if (!parent)
- ei->attr.mode = EVENTFS_TOPLEVEL;
-
- /* This is used as the default ownership of the files and directories */
ei->attr.uid = uid;
ei->attr.gid = gid;
+ /*
+ * When the "events" directory is created, it takes on the
+ * permissions of its parent. But can be reset on remount.
+ */
+ ei->attr.mode |= EVENTFS_SAVE_UID | EVENTFS_SAVE_GID;
+
INIT_LIST_HEAD(&ei->children);
INIT_LIST_HEAD(&ei->list);
ti = get_tracefs(inode);
- ti->flags |= TRACEFS_EVENT_INODE | TRACEFS_EVENT_TOP_INODE;
+ ti->flags |= TRACEFS_EVENT_INODE;
ti->private = ei;
inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
inode->i_uid = uid;
inode->i_gid = gid;
- inode->i_op = &eventfs_root_dir_inode_operations;
+ inode->i_op = &eventfs_dir_inode_operations;
inode->i_fop = &eventfs_file_operations;
dentry->d_fsdata = get_ei(ei);
@@ -829,7 +880,7 @@ struct eventfs_inode *eventfs_create_events_dir(const char *name, struct dentry
return ei;
fail:
- free_ei(ei);
+ cleanup_ei(ei);
tracefs_failed_creating(dentry);
return ERR_PTR(-ENOMEM);
}
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index 5545e6bf7d26..a827f6a716c4 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -11,14 +11,14 @@
#include <linux/module.h>
#include <linux/fs.h>
-#include <linux/mount.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/kobject.h>
#include <linux/namei.h>
#include <linux/tracefs.h>
#include <linux/fsnotify.h>
#include <linux/security.h>
#include <linux/seq_file.h>
-#include <linux/parser.h>
#include <linux/magic.h>
#include <linux/slab.h>
#include "internal.h"
@@ -30,20 +30,47 @@ static struct vfsmount *tracefs_mount;
static int tracefs_mount_count;
static bool tracefs_registered;
+/*
+ * Keep track of all tracefs_inodes in order to update their
+ * flags if necessary on a remount.
+ */
+static DEFINE_SPINLOCK(tracefs_inode_lock);
+static LIST_HEAD(tracefs_inodes);
+
static struct inode *tracefs_alloc_inode(struct super_block *sb)
{
struct tracefs_inode *ti;
+ unsigned long flags;
ti = kmem_cache_alloc(tracefs_inode_cachep, GFP_KERNEL);
if (!ti)
return NULL;
+ spin_lock_irqsave(&tracefs_inode_lock, flags);
+ list_add_rcu(&ti->list, &tracefs_inodes);
+ spin_unlock_irqrestore(&tracefs_inode_lock, flags);
+
return &ti->vfs_inode;
}
+static void tracefs_free_inode_rcu(struct rcu_head *rcu)
+{
+ struct tracefs_inode *ti;
+
+ ti = container_of(rcu, struct tracefs_inode, rcu);
+ kmem_cache_free(tracefs_inode_cachep, ti);
+}
+
static void tracefs_free_inode(struct inode *inode)
{
- kmem_cache_free(tracefs_inode_cachep, get_tracefs(inode));
+ struct tracefs_inode *ti = get_tracefs(inode);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tracefs_inode_lock, flags);
+ list_del_rcu(&ti->list);
+ spin_unlock_irqrestore(&tracefs_inode_lock, flags);
+
+ call_rcu(&ti->rcu, tracefs_free_inode_rcu);
}
static ssize_t default_read_file(struct file *file, char __user *buf,
@@ -153,16 +180,39 @@ static void set_tracefs_inode_owner(struct inode *inode)
{
struct tracefs_inode *ti = get_tracefs(inode);
struct inode *root_inode = ti->private;
+ kuid_t uid;
+ kgid_t gid;
+
+ uid = root_inode->i_uid;
+ gid = root_inode->i_gid;
+
+ /*
+ * If the root is not the mount point, then check the root's
+ * permissions. If it was never set, then default to the
+ * mount point.
+ */
+ if (root_inode != d_inode(root_inode->i_sb->s_root)) {
+ struct tracefs_inode *rti;
+
+ rti = get_tracefs(root_inode);
+ root_inode = d_inode(root_inode->i_sb->s_root);
+
+ if (!(rti->flags & TRACEFS_UID_PERM_SET))
+ uid = root_inode->i_uid;
+
+ if (!(rti->flags & TRACEFS_GID_PERM_SET))
+ gid = root_inode->i_gid;
+ }
/*
* If this inode has never been referenced, then update
* the permissions to the superblock.
*/
if (!(ti->flags & TRACEFS_UID_PERM_SET))
- inode->i_uid = root_inode->i_uid;
+ inode->i_uid = uid;
if (!(ti->flags & TRACEFS_GID_PERM_SET))
- inode->i_gid = root_inode->i_gid;
+ inode->i_gid = gid;
}
static int tracefs_permission(struct mnt_idmap *idmap,
@@ -231,7 +281,7 @@ struct inode *tracefs_get_inode(struct super_block *sb)
return inode;
}
-struct tracefs_mount_opts {
+struct tracefs_fs_info {
kuid_t uid;
kgid_t gid;
umode_t mode;
@@ -243,68 +293,51 @@ enum {
Opt_uid,
Opt_gid,
Opt_mode,
- Opt_err
};
-static const match_table_t tokens = {
- {Opt_uid, "uid=%u"},
- {Opt_gid, "gid=%u"},
- {Opt_mode, "mode=%o"},
- {Opt_err, NULL}
+static const struct fs_parameter_spec tracefs_param_specs[] = {
+ fsparam_u32 ("gid", Opt_gid),
+ fsparam_u32oct ("mode", Opt_mode),
+ fsparam_u32 ("uid", Opt_uid),
+ {}
};
-struct tracefs_fs_info {
- struct tracefs_mount_opts mount_opts;
-};
-
-static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
+static int tracefs_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
- substring_t args[MAX_OPT_ARGS];
- int option;
- int token;
+ struct tracefs_fs_info *opts = fc->s_fs_info;
+ struct fs_parse_result result;
kuid_t uid;
kgid_t gid;
- char *p;
-
- opts->opts = 0;
- opts->mode = TRACEFS_DEFAULT_MODE;
-
- while ((p = strsep(&data, ",")) != NULL) {
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_uid:
- if (match_int(&args[0], &option))
- return -EINVAL;
- uid = make_kuid(current_user_ns(), option);
- if (!uid_valid(uid))
- return -EINVAL;
- opts->uid = uid;
- break;
- case Opt_gid:
- if (match_int(&args[0], &option))
- return -EINVAL;
- gid = make_kgid(current_user_ns(), option);
- if (!gid_valid(gid))
- return -EINVAL;
- opts->gid = gid;
- break;
- case Opt_mode:
- if (match_octal(&args[0], &option))
- return -EINVAL;
- opts->mode = option & S_IALLUGO;
- break;
- /*
- * We might like to report bad mount options here;
- * but traditionally tracefs has ignored all mount options
- */
- }
-
- opts->opts |= BIT(token);
+ int opt;
+
+ opt = fs_parse(fc, tracefs_param_specs, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_uid:
+ uid = make_kuid(current_user_ns(), result.uint_32);
+ if (!uid_valid(uid))
+ return invalf(fc, "Unknown uid");
+ opts->uid = uid;
+ break;
+ case Opt_gid:
+ gid = make_kgid(current_user_ns(), result.uint_32);
+ if (!gid_valid(gid))
+ return invalf(fc, "Unknown gid");
+ opts->gid = gid;
+ break;
+ case Opt_mode:
+ opts->mode = result.uint_32 & S_IALLUGO;
+ break;
+ /*
+ * We might like to report bad mount options here;
+ * but traditionally tracefs has ignored all mount options
+ */
}
+ opts->opts |= BIT(opt);
+
return 0;
}
@@ -312,7 +345,8 @@ static int tracefs_apply_options(struct super_block *sb, bool remount)
{
struct tracefs_fs_info *fsi = sb->s_fs_info;
struct inode *inode = d_inode(sb->s_root);
- struct tracefs_mount_opts *opts = &fsi->mount_opts;
+ struct tracefs_inode *ti;
+ bool update_uid, update_gid;
umode_t tmp_mode;
/*
@@ -320,50 +354,65 @@ static int tracefs_apply_options(struct super_block *sb, bool remount)
* options.
*/
- if (!remount || opts->opts & BIT(Opt_mode)) {
+ if (!remount || fsi->opts & BIT(Opt_mode)) {
tmp_mode = READ_ONCE(inode->i_mode) & ~S_IALLUGO;
- tmp_mode |= opts->mode;
+ tmp_mode |= fsi->mode;
WRITE_ONCE(inode->i_mode, tmp_mode);
}
- if (!remount || opts->opts & BIT(Opt_uid))
- inode->i_uid = opts->uid;
+ if (!remount || fsi->opts & BIT(Opt_uid))
+ inode->i_uid = fsi->uid;
+
+ if (!remount || fsi->opts & BIT(Opt_gid))
+ inode->i_gid = fsi->gid;
+
+ if (remount && (fsi->opts & BIT(Opt_uid) || fsi->opts & BIT(Opt_gid))) {
- if (!remount || opts->opts & BIT(Opt_gid))
- inode->i_gid = opts->gid;
+ update_uid = fsi->opts & BIT(Opt_uid);
+ update_gid = fsi->opts & BIT(Opt_gid);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ti, &tracefs_inodes, list) {
+ if (update_uid)
+ ti->flags &= ~TRACEFS_UID_PERM_SET;
+
+ if (update_gid)
+ ti->flags &= ~TRACEFS_GID_PERM_SET;
+
+ if (ti->flags & TRACEFS_EVENT_INODE)
+ eventfs_remount(ti, update_uid, update_gid);
+ }
+ rcu_read_unlock();
+ }
return 0;
}
-static int tracefs_remount(struct super_block *sb, int *flags, char *data)
+static int tracefs_reconfigure(struct fs_context *fc)
{
- int err;
- struct tracefs_fs_info *fsi = sb->s_fs_info;
+ struct super_block *sb = fc->root->d_sb;
+ struct tracefs_fs_info *sb_opts = sb->s_fs_info;
+ struct tracefs_fs_info *new_opts = fc->s_fs_info;
sync_filesystem(sb);
- err = tracefs_parse_options(data, &fsi->mount_opts);
- if (err)
- goto fail;
-
- tracefs_apply_options(sb, true);
+ /* structure copy of new mount options to sb */
+ *sb_opts = *new_opts;
-fail:
- return err;
+ return tracefs_apply_options(sb, true);
}
static int tracefs_show_options(struct seq_file *m, struct dentry *root)
{
struct tracefs_fs_info *fsi = root->d_sb->s_fs_info;
- struct tracefs_mount_opts *opts = &fsi->mount_opts;
- if (!uid_eq(opts->uid, GLOBAL_ROOT_UID))
+ if (!uid_eq(fsi->uid, GLOBAL_ROOT_UID))
seq_printf(m, ",uid=%u",
- from_kuid_munged(&init_user_ns, opts->uid));
- if (!gid_eq(opts->gid, GLOBAL_ROOT_GID))
+ from_kuid_munged(&init_user_ns, fsi->uid));
+ if (!gid_eq(fsi->gid, GLOBAL_ROOT_GID))
seq_printf(m, ",gid=%u",
- from_kgid_munged(&init_user_ns, opts->gid));
- if (opts->mode != TRACEFS_DEFAULT_MODE)
- seq_printf(m, ",mode=%o", opts->mode);
+ from_kgid_munged(&init_user_ns, fsi->gid));
+ if (fsi->mode != TRACEFS_DEFAULT_MODE)
+ seq_printf(m, ",mode=%o", fsi->mode);
return 0;
}
@@ -373,7 +422,6 @@ static const struct super_operations tracefs_super_operations = {
.free_inode = tracefs_free_inode,
.drop_inode = generic_delete_inode,
.statfs = simple_statfs,
- .remount_fs = tracefs_remount,
.show_options = tracefs_show_options,
};
@@ -398,31 +446,34 @@ static int tracefs_d_revalidate(struct dentry *dentry, unsigned int flags)
return !(ei && ei->is_freed);
}
+static void tracefs_d_iput(struct dentry *dentry, struct inode *inode)
+{
+ struct tracefs_inode *ti = get_tracefs(inode);
+
+ /*
+ * This inode is being freed and cannot be used for
+ * eventfs. Clear the flag so that it doesn't call into
+ * eventfs during the remount flag updates. The eventfs_inode
+ * gets freed after an RCU cycle, so the content will still
+ * be safe if the iteration is going on now.
+ */
+ ti->flags &= ~TRACEFS_EVENT_INODE;
+}
+
static const struct dentry_operations tracefs_dentry_operations = {
+ .d_iput = tracefs_d_iput,
.d_revalidate = tracefs_d_revalidate,
.d_release = tracefs_d_release,
};
-static int trace_fill_super(struct super_block *sb, void *data, int silent)
+static int tracefs_fill_super(struct super_block *sb, struct fs_context *fc)
{
static const struct tree_descr trace_files[] = {{""}};
- struct tracefs_fs_info *fsi;
int err;
- fsi = kzalloc(sizeof(struct tracefs_fs_info), GFP_KERNEL);
- sb->s_fs_info = fsi;
- if (!fsi) {
- err = -ENOMEM;
- goto fail;
- }
-
- err = tracefs_parse_options(data, &fsi->mount_opts);
+ err = simple_fill_super(sb, TRACEFS_MAGIC, trace_files);
if (err)
- goto fail;
-
- err = simple_fill_super(sb, TRACEFS_MAGIC, trace_files);
- if (err)
- goto fail;
+ return err;
sb->s_op = &tracefs_super_operations;
sb->s_d_op = &tracefs_dentry_operations;
@@ -430,24 +481,45 @@ static int trace_fill_super(struct super_block *sb, void *data, int silent)
tracefs_apply_options(sb, false);
return 0;
+}
-fail:
- kfree(fsi);
- sb->s_fs_info = NULL;
- return err;
+static int tracefs_get_tree(struct fs_context *fc)
+{
+ return get_tree_single(fc, tracefs_fill_super);
+}
+
+static void tracefs_free_fc(struct fs_context *fc)
+{
+ kfree(fc->s_fs_info);
}
-static struct dentry *trace_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name,
- void *data)
+static const struct fs_context_operations tracefs_context_ops = {
+ .free = tracefs_free_fc,
+ .parse_param = tracefs_parse_param,
+ .get_tree = tracefs_get_tree,
+ .reconfigure = tracefs_reconfigure,
+};
+
+static int tracefs_init_fs_context(struct fs_context *fc)
{
- return mount_single(fs_type, flags, data, trace_fill_super);
+ struct tracefs_fs_info *fsi;
+
+ fsi = kzalloc(sizeof(struct tracefs_fs_info), GFP_KERNEL);
+ if (!fsi)
+ return -ENOMEM;
+
+ fsi->mode = TRACEFS_DEFAULT_MODE;
+
+ fc->s_fs_info = fsi;
+ fc->ops = &tracefs_context_ops;
+ return 0;
}
static struct file_system_type trace_fs_type = {
.owner = THIS_MODULE,
.name = "tracefs",
- .mount = trace_mount,
+ .init_fs_context = tracefs_init_fs_context,
+ .parameters = tracefs_param_specs,
.kill_sb = kill_litter_super,
};
MODULE_ALIAS_FS("tracefs");
diff --git a/fs/tracefs/internal.h b/fs/tracefs/internal.h
index 15c26f9aaad4..f704d8348357 100644
--- a/fs/tracefs/internal.h
+++ b/fs/tracefs/internal.h
@@ -4,15 +4,18 @@
enum {
TRACEFS_EVENT_INODE = BIT(1),
- TRACEFS_EVENT_TOP_INODE = BIT(2),
- TRACEFS_GID_PERM_SET = BIT(3),
- TRACEFS_UID_PERM_SET = BIT(4),
- TRACEFS_INSTANCE_INODE = BIT(5),
+ TRACEFS_GID_PERM_SET = BIT(2),
+ TRACEFS_UID_PERM_SET = BIT(3),
+ TRACEFS_INSTANCE_INODE = BIT(4),
};
struct tracefs_inode {
- struct inode vfs_inode;
+ union {
+ struct inode vfs_inode;
+ struct rcu_head rcu;
+ };
/* The below gets initialized with memset_after(ti, 0, vfs_inode) */
+ struct list_head list;
unsigned long flags;
void *private;
};
@@ -73,6 +76,7 @@ struct dentry *tracefs_end_creating(struct dentry *dentry);
struct dentry *tracefs_failed_creating(struct dentry *dentry);
struct inode *tracefs_get_inode(struct super_block *sb);
+void eventfs_remount(struct tracefs_inode *ti, bool update_uid, bool update_gid);
void eventfs_d_release(struct dentry *dentry);
#endif /* _TRACEFS_INTERNAL_H */
diff --git a/fs/unicode/Makefile b/fs/unicode/Makefile
index 0e51c0025a16..e309afe2b2bb 100644
--- a/fs/unicode/Makefile
+++ b/fs/unicode/Makefile
@@ -18,13 +18,13 @@ ifdef REGENERATE_UTF8DATA
quiet_cmd_utf8data = GEN $@
cmd_utf8data = $< \
- -a $(srctree)/$(src)/DerivedAge.txt \
- -c $(srctree)/$(src)/DerivedCombiningClass.txt \
- -p $(srctree)/$(src)/DerivedCoreProperties.txt \
- -d $(srctree)/$(src)/UnicodeData.txt \
- -f $(srctree)/$(src)/CaseFolding.txt \
- -n $(srctree)/$(src)/NormalizationCorrections.txt \
- -t $(srctree)/$(src)/NormalizationTest.txt \
+ -a $(src)/DerivedAge.txt \
+ -c $(src)/DerivedCombiningClass.txt \
+ -p $(src)/DerivedCoreProperties.txt \
+ -d $(src)/UnicodeData.txt \
+ -f $(src)/CaseFolding.txt \
+ -n $(src)/NormalizationCorrections.txt \
+ -t $(src)/NormalizationTest.txt \
-o $@
$(obj)/utf8data.c: $(obj)/mkutf8data $(filter %.txt, $(cmd_utf8data)) FORCE
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 60dcfafdc11a..eee7320ab0b0 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -31,6 +31,7 @@
#include <linux/hugetlb.h>
#include <linux/swapops.h>
#include <linux/miscdevice.h>
+#include <linux/uio.h>
static int sysctl_unprivileged_userfaultfd __read_mostly;
@@ -282,7 +283,7 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
/*
* Verify the pagetables are still not ok after having reigstered into
* the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any
- * userfault that has already been resolved, if userfaultfd_read and
+ * userfault that has already been resolved, if userfaultfd_read_iter and
* UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different
* threads.
*/
@@ -657,7 +658,10 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
struct userfaultfd_fork_ctx *fctx;
octx = vma->vm_userfaultfd_ctx.ctx;
- if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
+ if (!octx)
+ return 0;
+
+ if (!(octx->features & UFFD_FEATURE_EVENT_FORK)) {
vma_start_write(vma);
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS);
@@ -895,6 +899,10 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
prev = vma;
continue;
}
+ /* Reset ptes for the whole vma range if wr-protected */
+ if (userfaultfd_wp(vma))
+ uffd_wp_range(vma, vma->vm_start,
+ vma->vm_end - vma->vm_start, false);
new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
vma = vma_modify_flags_uffd(&vmi, prev, vma, vma->vm_start,
vma->vm_end, new_flags,
@@ -1177,34 +1185,34 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
return ret;
}
-static ssize_t userfaultfd_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t userfaultfd_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
+ struct file *file = iocb->ki_filp;
struct userfaultfd_ctx *ctx = file->private_data;
ssize_t _ret, ret = 0;
struct uffd_msg msg;
- int no_wait = file->f_flags & O_NONBLOCK;
struct inode *inode = file_inode(file);
+ bool no_wait;
if (!userfaultfd_is_initialized(ctx))
return -EINVAL;
+ no_wait = file->f_flags & O_NONBLOCK || iocb->ki_flags & IOCB_NOWAIT;
for (;;) {
- if (count < sizeof(msg))
+ if (iov_iter_count(to) < sizeof(msg))
return ret ? ret : -EINVAL;
_ret = userfaultfd_ctx_read(ctx, no_wait, &msg, inode);
if (_ret < 0)
return ret ? ret : _ret;
- if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg)))
+ _ret = !copy_to_iter_full(&msg, sizeof(msg), to);
+ if (_ret)
return ret ? ret : -EFAULT;
ret += sizeof(msg);
- buf += sizeof(msg);
- count -= sizeof(msg);
/*
* Allow to read more than one fault at time but only
* block if waiting for the very first one.
*/
- no_wait = O_NONBLOCK;
+ no_wait = true;
}
}
@@ -2172,7 +2180,7 @@ static const struct file_operations userfaultfd_fops = {
#endif
.release = userfaultfd_release,
.poll = userfaultfd_poll,
- .read = userfaultfd_read,
+ .read_iter = userfaultfd_read_iter,
.unlocked_ioctl = userfaultfd_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
@@ -2192,6 +2200,7 @@ static void init_once_userfaultfd_ctx(void *mem)
static int new_userfaultfd(int flags)
{
struct userfaultfd_ctx *ctx;
+ struct file *file;
int fd;
BUG_ON(!current->mm);
@@ -2215,16 +2224,26 @@ static int new_userfaultfd(int flags)
init_rwsem(&ctx->map_changing_lock);
atomic_set(&ctx->mmap_changing, 0);
ctx->mm = current->mm;
- /* prevent the mm struct to be freed */
- mmgrab(ctx->mm);
+
+ fd = get_unused_fd_flags(flags & UFFD_SHARED_FCNTL_FLAGS);
+ if (fd < 0)
+ goto err_out;
/* Create a new inode so that the LSM can block the creation. */
- fd = anon_inode_create_getfd("[userfaultfd]", &userfaultfd_fops, ctx,
+ file = anon_inode_create_getfile("[userfaultfd]", &userfaultfd_fops, ctx,
O_RDONLY | (flags & UFFD_SHARED_FCNTL_FLAGS), NULL);
- if (fd < 0) {
- mmdrop(ctx->mm);
- kmem_cache_free(userfaultfd_ctx_cachep, ctx);
+ if (IS_ERR(file)) {
+ put_unused_fd(fd);
+ fd = PTR_ERR(file);
+ goto err_out;
}
+ /* prevent the mm struct to be freed */
+ mmgrab(ctx->mm);
+ file->f_mode |= FMODE_NOWAIT;
+ fd_install(fd, file);
+ return fd;
+err_out:
+ kmem_cache_free(userfaultfd_ctx_cachep, ctx);
return fd;
}
diff --git a/fs/vboxsf/file.c b/fs/vboxsf/file.c
index 2307f8037efc..118dedef8ebe 100644
--- a/fs/vboxsf/file.c
+++ b/fs/vboxsf/file.c
@@ -218,6 +218,7 @@ const struct file_operations vboxsf_reg_fops = {
.release = vboxsf_file_release,
.fsync = noop_fsync,
.splice_read = filemap_splice_read,
+ .setlease = simple_nosetlease,
};
const struct inode_operations vboxsf_reg_iops = {
diff --git a/fs/vboxsf/super.c b/fs/vboxsf/super.c
index cabe8ac4fefc..ffb1d565da39 100644
--- a/fs/vboxsf/super.c
+++ b/fs/vboxsf/super.c
@@ -151,11 +151,11 @@ static int vboxsf_fill_super(struct super_block *sb, struct fs_context *fc)
if (!sbi->nls) {
vbg_err("vboxsf: Count not load '%s' nls\n", nls_name);
err = -EINVAL;
- goto fail_free;
+ goto fail_destroy_idr;
}
}
- sbi->bdi_id = ida_simple_get(&vboxsf_bdi_ida, 0, 0, GFP_KERNEL);
+ sbi->bdi_id = ida_alloc(&vboxsf_bdi_ida, GFP_KERNEL);
if (sbi->bdi_id < 0) {
err = sbi->bdi_id;
goto fail_free;
@@ -221,9 +221,10 @@ fail_unmap:
vboxsf_unmap_folder(sbi->root);
fail_free:
if (sbi->bdi_id >= 0)
- ida_simple_remove(&vboxsf_bdi_ida, sbi->bdi_id);
+ ida_free(&vboxsf_bdi_ida, sbi->bdi_id);
if (sbi->nls)
unload_nls(sbi->nls);
+fail_destroy_idr:
idr_destroy(&sbi->ino_idr);
kfree(sbi);
return err;
@@ -268,7 +269,7 @@ static void vboxsf_put_super(struct super_block *sb)
vboxsf_unmap_folder(sbi->root);
if (sbi->bdi_id >= 0)
- ida_simple_remove(&vboxsf_bdi_ida, sbi->bdi_id);
+ ida_free(&vboxsf_bdi_ida, sbi->bdi_id);
if (sbi->nls)
unload_nls(sbi->nls);
diff --git a/fs/vboxsf/utils.c b/fs/vboxsf/utils.c
index 72ac9320e6a3..9515bbf0b54c 100644
--- a/fs/vboxsf/utils.c
+++ b/fs/vboxsf/utils.c
@@ -440,7 +440,6 @@ int vboxsf_nlscpy(struct vboxsf_sbi *sbi, char *name, size_t name_bound_len,
{
const char *in;
char *out;
- size_t out_len;
size_t out_bound_len;
size_t in_bound_len;
@@ -448,7 +447,6 @@ int vboxsf_nlscpy(struct vboxsf_sbi *sbi, char *name, size_t name_bound_len,
in_bound_len = utf8_len;
out = name;
- out_len = 0;
/* Reserve space for terminating 0 */
out_bound_len = name_bound_len - 1;
@@ -469,7 +467,6 @@ int vboxsf_nlscpy(struct vboxsf_sbi *sbi, char *name, size_t name_bound_len,
out += nb;
out_bound_len -= nb;
- out_len += nb;
}
*out = 0;
diff --git a/fs/verity/init.c b/fs/verity/init.c
index cb2c9aac61ed..f440f0e61e3e 100644
--- a/fs/verity/init.c
+++ b/fs/verity/init.c
@@ -10,8 +10,6 @@
#include <linux/ratelimit.h>
#ifdef CONFIG_SYSCTL
-static struct ctl_table_header *fsverity_sysctl_header;
-
static struct ctl_table fsverity_sysctl_table[] = {
#ifdef CONFIG_FS_VERITY_BUILTIN_SIGNATURES
{
@@ -28,10 +26,7 @@ static struct ctl_table fsverity_sysctl_table[] = {
static void __init fsverity_init_sysctl(void)
{
- fsverity_sysctl_header = register_sysctl("fs/verity",
- fsverity_sysctl_table);
- if (!fsverity_sysctl_header)
- panic("fsverity sysctl registration failed");
+ register_sysctl_init("fs/verity", fsverity_sysctl_table);
}
#else /* CONFIG_SYSCTL */
static inline void fsverity_init_sysctl(void)
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 76674ad5833e..c5a35e32adf0 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -4,8 +4,8 @@
# All Rights Reserved.
#
-ccflags-y += -I $(srctree)/$(src) # needed for trace events
-ccflags-y += -I $(srctree)/$(src)/libxfs
+ccflags-y += -I $(src) # needed for trace events
+ccflags-y += -I $(src)/libxfs
obj-$(CONFIG_XFS_FS) += xfs.o
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index d991eec05436..73a4b895de67 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -530,7 +530,8 @@ xfs_validate_sb_common(
}
if (!xfs_validate_stripe_geometry(mp, XFS_FSB_TO_B(mp, sbp->sb_unit),
- XFS_FSB_TO_B(mp, sbp->sb_width), 0, false))
+ XFS_FSB_TO_B(mp, sbp->sb_width), 0,
+ xfs_buf_daddr(bp) == XFS_SB_DADDR, false))
return -EFSCORRUPTED;
/*
@@ -1323,8 +1324,10 @@ xfs_sb_get_secondary(
}
/*
- * sunit, swidth, sectorsize(optional with 0) should be all in bytes,
- * so users won't be confused by values in error messages.
+ * sunit, swidth, sectorsize(optional with 0) should be all in bytes, so users
+ * won't be confused by values in error messages. This function returns false
+ * if the stripe geometry is invalid and the caller is unable to repair the
+ * stripe configuration later in the mount process.
*/
bool
xfs_validate_stripe_geometry(
@@ -1332,20 +1335,21 @@ xfs_validate_stripe_geometry(
__s64 sunit,
__s64 swidth,
int sectorsize,
+ bool may_repair,
bool silent)
{
if (swidth > INT_MAX) {
if (!silent)
xfs_notice(mp,
"stripe width (%lld) is too large", swidth);
- return false;
+ goto check_override;
}
if (sunit > swidth) {
if (!silent)
xfs_notice(mp,
"stripe unit (%lld) is larger than the stripe width (%lld)", sunit, swidth);
- return false;
+ goto check_override;
}
if (sectorsize && (int)sunit % sectorsize) {
@@ -1353,21 +1357,21 @@ xfs_validate_stripe_geometry(
xfs_notice(mp,
"stripe unit (%lld) must be a multiple of the sector size (%d)",
sunit, sectorsize);
- return false;
+ goto check_override;
}
if (sunit && !swidth) {
if (!silent)
xfs_notice(mp,
"invalid stripe unit (%lld) and stripe width of 0", sunit);
- return false;
+ goto check_override;
}
if (!sunit && swidth) {
if (!silent)
xfs_notice(mp,
"invalid stripe width (%lld) and stripe unit of 0", swidth);
- return false;
+ goto check_override;
}
if (sunit && (int)swidth % (int)sunit) {
@@ -1375,9 +1379,27 @@ xfs_validate_stripe_geometry(
xfs_notice(mp,
"stripe width (%lld) must be a multiple of the stripe unit (%lld)",
swidth, sunit);
- return false;
+ goto check_override;
}
return true;
+
+check_override:
+ if (!may_repair)
+ return false;
+ /*
+ * During mount, mp->m_dalign will not be set unless the sunit mount
+ * option was set. If it was set, ignore the bad stripe alignment values
+ * and allow the validation and overwrite later in the mount process to
+ * attempt to overwrite the bad stripe alignment values with the values
+ * supplied by mount options.
+ */
+ if (!mp->m_dalign)
+ return false;
+ if (!silent)
+ xfs_notice(mp,
+"Will try to correct with specified mount options sunit (%d) and swidth (%d)",
+ BBTOB(mp->m_dalign), BBTOB(mp->m_swidth));
+ return true;
}
/*
diff --git a/fs/xfs/libxfs/xfs_sb.h b/fs/xfs/libxfs/xfs_sb.h
index 2e8e8d63d4eb..37b1ed1bc209 100644
--- a/fs/xfs/libxfs/xfs_sb.h
+++ b/fs/xfs/libxfs/xfs_sb.h
@@ -35,8 +35,9 @@ extern int xfs_sb_get_secondary(struct xfs_mount *mp,
struct xfs_trans *tp, xfs_agnumber_t agno,
struct xfs_buf **bpp);
-extern bool xfs_validate_stripe_geometry(struct xfs_mount *mp,
- __s64 sunit, __s64 swidth, int sectorsize, bool silent);
+bool xfs_validate_stripe_geometry(struct xfs_mount *mp,
+ __s64 sunit, __s64 swidth, int sectorsize, bool may_repair,
+ bool silent);
uint8_t xfs_compute_rextslog(xfs_rtbxlen_t rtextents);
diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c
index abff79a77c72..47a20cf5205f 100644
--- a/fs/xfs/scrub/common.c
+++ b/fs/xfs/scrub/common.c
@@ -1044,9 +1044,7 @@ xchk_irele(
struct xfs_scrub *sc,
struct xfs_inode *ip)
{
- if (current->journal_info != NULL) {
- ASSERT(current->journal_info == sc->tp);
-
+ if (sc->tp) {
/*
* If we are in a transaction, we /cannot/ drop the inode
* ourselves, because the VFS will trigger writeback, which
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 1698507d1ac7..3f428620ebf2 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -503,13 +503,6 @@ xfs_vm_writepages(
{
struct xfs_writepage_ctx wpc = { };
- /*
- * Writing back data in a transaction context can result in recursive
- * transactions. This is bad, so issue a warning and get out of here.
- */
- if (WARN_ON_ONCE(current->journal_info))
- return 0;
-
xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops);
}
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 1a18c381127e..f0fa02264eda 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -2030,7 +2030,7 @@ xfs_free_buftarg(
fs_put_dax(btp->bt_daxdev, btp->bt_mount);
/* the main block device is closed by kill_block_super */
if (btp->bt_bdev != btp->bt_mount->m_super->s_bdev)
- fput(btp->bt_bdev_file);
+ bdev_fput(btp->bt_bdev_file);
kfree(btp);
}
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 632653e00906..2ce302b4885f 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -1230,8 +1230,7 @@ xfs_file_open(
{
if (xfs_is_shutdown(XFS_M(inode->i_sb)))
return -EIO;
- file->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC | FMODE_BUF_WASYNC |
- FMODE_DIO_PARALLEL_WRITE | FMODE_CAN_ODIRECT;
+ file->f_mode |= FMODE_NOWAIT | FMODE_CAN_ODIRECT;
return generic_file_open(inode, file);
}
@@ -1244,7 +1243,9 @@ xfs_dir_open(
unsigned int mode;
int error;
- error = xfs_file_open(inode, file);
+ if (xfs_is_shutdown(ip->i_mount))
+ return -EIO;
+ error = generic_file_open(inode, file);
if (error)
return error;
@@ -1490,7 +1491,6 @@ const struct file_operations xfs_file_operations = {
.compat_ioctl = xfs_file_compat_ioctl,
#endif
.mmap = xfs_file_mmap,
- .mmap_supported_flags = MAP_SYNC,
.open = xfs_file_open,
.release = xfs_file_release,
.fsync = xfs_file_fsync,
@@ -1498,6 +1498,8 @@ const struct file_operations xfs_file_operations = {
.fallocate = xfs_file_fallocate,
.fadvise = xfs_file_fadvise,
.remap_file_range = xfs_file_remap_range,
+ .fop_flags = FOP_MMAP_SYNC | FOP_BUFFER_RASYNC |
+ FOP_BUFFER_WASYNC | FOP_DIO_PARALLEL_WRITE,
};
const struct file_operations xfs_dir_file_operations = {
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index e64265bc0b33..74f1812b03cb 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -2039,8 +2039,10 @@ xfs_inodegc_want_queue_work(
* - Memory shrinkers queued the inactivation worker and it hasn't finished.
* - The queue depth exceeds the maximum allowable percpu backlog.
*
- * Note: If the current thread is running a transaction, we don't ever want to
- * wait for other transactions because that could introduce a deadlock.
+ * Note: If we are in a NOFS context here (e.g. current thread is running a
+ * transaction) the we don't want to block here as inodegc progress may require
+ * filesystem resources we hold to make progress and that could result in a
+ * deadlock. Hence we skip out of here if we are in a scoped NOFS context.
*/
static inline bool
xfs_inodegc_want_flush_work(
@@ -2048,7 +2050,7 @@ xfs_inodegc_want_flush_work(
unsigned int items,
unsigned int shrinker_hits)
{
- if (current->journal_info)
+ if (current->flags & PF_MEMALLOC_NOFS)
return false;
if (shrinker_hits > 0)
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index ea48774f6b76..d55b42b2480d 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1301,8 +1301,19 @@ xfs_link(
*/
if (unlikely((tdp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
tdp->i_projid != sip->i_projid)) {
- error = -EXDEV;
- goto error_return;
+ /*
+ * Project quota setup skips special files which can
+ * leave inodes in a PROJINHERIT directory without a
+ * project ID set. We need to allow links to be made
+ * to these "project-less" inodes because userspace
+ * expects them to succeed after project ID setup,
+ * but everything else should be rejected.
+ */
+ if (!special_file(VFS_I(sip)->i_mode) ||
+ sip->i_projid != 0) {
+ error = -EXDEV;
+ goto error_return;
+ }
}
if (!resblks) {
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index c21f10ab0f5d..bce020374c5e 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -485,7 +485,7 @@ xfs_open_devices(
mp->m_logdev_targp = mp->m_ddev_targp;
/* Handle won't be used, drop it */
if (logdev_file)
- fput(logdev_file);
+ bdev_fput(logdev_file);
}
return 0;
@@ -497,10 +497,10 @@ xfs_open_devices(
xfs_free_buftarg(mp->m_ddev_targp);
out_close_rtdev:
if (rtdev_file)
- fput(rtdev_file);
+ bdev_fput(rtdev_file);
out_close_logdev:
if (logdev_file)
- fput(logdev_file);
+ bdev_fput(logdev_file);
return error;
}
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 3f7e3a09a49f..1636663707dc 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -268,19 +268,14 @@ static inline void
xfs_trans_set_context(
struct xfs_trans *tp)
{
- ASSERT(current->journal_info == NULL);
tp->t_pflags = memalloc_nofs_save();
- current->journal_info = tp;
}
static inline void
xfs_trans_clear_context(
struct xfs_trans *tp)
{
- if (current->journal_info == tp) {
- memalloc_nofs_restore(tp->t_pflags);
- current->journal_info = NULL;
- }
+ memalloc_nofs_restore(tp->t_pflags);
}
static inline void
@@ -288,10 +283,8 @@ xfs_trans_switch_context(
struct xfs_trans *old_tp,
struct xfs_trans *new_tp)
{
- ASSERT(current->journal_info == old_tp);
new_tp->t_pflags = old_tp->t_pflags;
old_tp->t_pflags = 0;
- current->journal_info = new_tp;
}
#endif /* __XFS_TRANS_H__ */
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index c6a124e8d565..964fa7f24003 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -1048,7 +1048,7 @@ static int zonefs_init_zgroup(struct super_block *sb,
zonefs_info(sb, "Zone group \"%s\" has %u file%s\n",
zonefs_zgroup_name(ztype),
zgroup->g_nr_zones,
- zgroup->g_nr_zones > 1 ? "s" : "");
+ str_plural(zgroup->g_nr_zones));
return 0;
}