diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2019-06-30 16:35:37 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2023-08-07 17:52:47 -0400 |
commit | 75d24e1c52291aec10ddc68b0771be7da738642c (patch) | |
tree | c90c8f9a6b169b5bd5e6b3894f043d7e066aabce | |
parent | 853bfa639dab6096c20942a020e5b809646a0388 (diff) |
bcachefs: Fixes for 4.19
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
43 files changed, 813 insertions, 460 deletions
diff --git a/fs/bcachefs/acl.c b/fs/bcachefs/acl.c index 5cb06ac58960..4c200da94130 100644 --- a/fs/bcachefs/acl.c +++ b/fs/bcachefs/acl.c @@ -212,10 +212,9 @@ bch2_acl_to_xattr(struct btree_trans *trans, return xattr; } -struct posix_acl *bch2_get_acl(struct user_namespace *mnt_userns, - struct dentry *dentry, int type) +struct posix_acl *bch2_get_acl(struct inode *vinode, int type) { - struct bch_inode_info *inode = to_bch_ei(dentry->d_inode); + struct bch_inode_info *inode = to_bch_ei(vinode); struct bch_fs *c = inode->v.i_sb->s_fs_info; struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode); struct btree_trans trans; @@ -290,11 +289,9 @@ int bch2_set_acl_trans(struct btree_trans *trans, subvol_inum inum, return ret == -ENOENT ? 0 : ret; } -int bch2_set_acl(struct user_namespace *mnt_userns, - struct dentry *dentry, - struct posix_acl *_acl, int type) +int bch2_set_acl(struct inode *vinode, struct posix_acl *_acl, int type) { - struct bch_inode_info *inode = to_bch_ei(dentry->d_inode); + struct bch_inode_info *inode = to_bch_ei(vinode); struct bch_fs *c = inode->v.i_sb->s_fs_info; struct btree_trans trans; struct btree_iter inode_iter = { NULL }; @@ -317,7 +314,7 @@ retry: mode = inode_u.bi_mode; if (type == ACL_TYPE_ACCESS) { - ret = posix_acl_update_mode(mnt_userns, &inode->v, &mode, &acl); + ret = posix_acl_update_mode(&inode->v, &mode, &acl); if (ret) goto btree_err; } diff --git a/fs/bcachefs/acl.h b/fs/bcachefs/acl.h index ac206f6584e9..14cabbc91808 100644 --- a/fs/bcachefs/acl.h +++ b/fs/bcachefs/acl.h @@ -26,12 +26,12 @@ typedef struct { __le32 a_version; } bch_acl_header; -struct posix_acl *bch2_get_acl(struct user_namespace *, struct dentry *, int); +struct posix_acl *bch2_get_acl(struct inode *, int); int bch2_set_acl_trans(struct btree_trans *, subvol_inum, struct bch_inode_unpacked *, struct posix_acl *, int); -int bch2_set_acl(struct user_namespace *, struct dentry *, struct posix_acl *, int); +int bch2_set_acl(struct inode *, struct posix_acl *, int); int bch2_acl_chmod(struct btree_trans *, subvol_inum, struct bch_inode_unpacked *, umode_t, struct posix_acl **); diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index aef796b5a48a..f2826e881b3c 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -1695,7 +1695,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans, blkdev_issue_discard(ca->disk_sb.bdev, k.k->p.offset * ca->mi.bucket_size, ca->mi.bucket_size, - GFP_KERNEL); + GFP_KERNEL, 0); *discard_pos_done = iter.pos; ret = bch2_trans_relock_notrace(trans); @@ -2073,7 +2073,7 @@ void bch2_recalc_capacity(struct bch_fs *c) lockdep_assert_held(&c->state_lock); for_each_online_member(ca, c, i) { - struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi; + struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_bdi; ra_pages += bdi->ra_pages; } diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h index 1e7c810d3569..b2ce62ba9097 100644 --- a/fs/bcachefs/bcachefs.h +++ b/fs/bcachefs/bcachefs.h @@ -911,7 +911,7 @@ struct bch_fs { ZSTD_parameters zstd_params; struct crypto_shash *sha256; - struct crypto_sync_skcipher *chacha20; + struct crypto_skcipher *chacha20; struct crypto_shash *poly1305; atomic64_t key_version; @@ -1048,11 +1048,13 @@ static inline void bch2_write_ref_put(struct bch_fs *c, enum bch_write_ref ref) { #ifdef BCH_WRITE_REF_DEBUG long v = atomic_long_dec_return(&c->writes[ref]); + unsigned i; BUG_ON(v < 0); if (v) return; - for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++) + + for (i = 0; i < BCH_WRITE_REF_NR; i++) if (atomic_long_read(&c->writes[i])) return; diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c index c53597a29e2e..0aa36e49f59f 100644 --- a/fs/bcachefs/btree_cache.c +++ b/fs/bcachefs/btree_cache.c @@ -100,7 +100,8 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp) if (!b->data) return -BCH_ERR_ENOMEM_btree_node_mem_alloc; #ifdef __KERNEL__ - b->aux_data = vmalloc_exec(btree_aux_data_bytes(b), gfp); + b->aux_data = __vmalloc(btree_aux_data_bytes(b), gfp, + PAGE_KERNEL_EXEC); #else b->aux_data = mmap(NULL, btree_aux_data_bytes(b), PROT_READ|PROT_WRITE|PROT_EXEC, @@ -127,9 +128,6 @@ static struct btree *__btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp) bkey_btree_ptr_init(&b->key); bch2_btree_lock_init(&b->c); -#ifdef CONFIG_DEBUG_LOCK_ALLOC - lockdep_set_no_check_recursion(&b->c.lock.dep_map); -#endif INIT_LIST_HEAD(&b->list); INIT_LIST_HEAD(&b->write_blocked); b->byte_order = ilog2(btree_bytes(c)); @@ -428,18 +426,6 @@ static unsigned long bch2_btree_cache_count(struct shrinker *shrink, return btree_cache_can_free(bc); } -static void bch2_btree_cache_shrinker_to_text(struct seq_buf *s, struct shrinker *shrink) -{ - struct bch_fs *c = container_of(shrink, struct bch_fs, - btree_cache.shrink); - char *cbuf; - size_t buflen = seq_buf_get_buf(s, &cbuf); - struct printbuf out = PRINTBUF_EXTERN(cbuf, buflen); - - bch2_btree_cache_to_text(&out, &c->btree_cache); - seq_buf_commit(s, out.pos); -} - void bch2_fs_btree_cache_exit(struct bch_fs *c) { struct btree_cache *bc = &c->btree_cache; @@ -523,9 +509,8 @@ int bch2_fs_btree_cache_init(struct bch_fs *c) bc->shrink.count_objects = bch2_btree_cache_count; bc->shrink.scan_objects = bch2_btree_cache_scan; - bc->shrink.to_text = bch2_btree_cache_shrinker_to_text; bc->shrink.seeks = 4; - ret = register_shrinker(&bc->shrink, "%s/btree_cache", c->name); + ret = register_shrinker(&bc->shrink); out: pr_verbose_init(c->opts, "ret %i", ret); return ret; diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index fb4226aa0255..86cf1250c8a2 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -805,7 +805,7 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id, if (fsck_err_on(k->k->version.lo > atomic64_read(&c->key_version), c, "key version number higher than recorded: %llu > %llu", k->k->version.lo, - atomic64_read(&c->key_version))) + (u64) atomic64_read(&c->key_version))) atomic64_set(&c->key_version, k->k->version.lo); } diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index 586e2f96f649..0ddc81aa4dd3 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -1203,7 +1203,8 @@ static void btree_node_read_work(struct work_struct *work) bch_info(c, "retrying read"); ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev); rb->have_ioref = bch2_dev_get_ioref(ca, READ); - bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META); + bio_reset(bio); + bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META; bio->bi_iter.bi_sector = rb->pick.ptr.offset; bio->bi_iter.bi_size = btree_bytes(c); @@ -1494,10 +1495,8 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool for (i = 0; i < ra->nr; i++) { ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS); - ra->bio[i] = bio_alloc_bioset(NULL, - buf_pages(ra->buf[i], btree_bytes(c)), - REQ_OP_READ|REQ_SYNC|REQ_META, - GFP_NOFS, + ra->bio[i] = bio_alloc_bioset(GFP_NOFS, buf_pages(ra->buf[i], + btree_bytes(c)), &c->btree_bio); } @@ -1513,6 +1512,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool rb->have_ioref = bch2_dev_get_ioref(ca, READ); rb->idx = i; rb->pick = pick; + rb->bio.bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META; rb->bio.bi_iter.bi_sector = pick.ptr.offset; rb->bio.bi_end_io = btree_node_read_all_replicas_endio; bch2_bio_map(&rb->bio, ra->buf[i], btree_bytes(c)); @@ -1579,10 +1579,8 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b, ca = bch_dev_bkey_exists(c, pick.ptr.dev); - bio = bio_alloc_bioset(NULL, - buf_pages(b->data, btree_bytes(c)), - REQ_OP_READ|REQ_SYNC|REQ_META, - GFP_NOIO, + bio = bio_alloc_bioset(GFP_NOIO, buf_pages(b->data, + btree_bytes(c)), &c->btree_bio); rb = container_of(bio, struct btree_read_bio, bio); rb->c = c; @@ -1592,6 +1590,7 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b, rb->have_ioref = bch2_dev_get_ioref(ca, READ); rb->pick = pick; INIT_WORK(&rb->work, btree_node_read_work); + bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META; bio->bi_iter.bi_sector = pick.ptr.offset; bio->bi_end_io = btree_node_read_endio; bch2_bio_map(bio, b->data, btree_bytes(c)); @@ -2075,10 +2074,8 @@ do_write: trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write); - wbio = container_of(bio_alloc_bioset(NULL, + wbio = container_of(bio_alloc_bioset(GFP_NOIO, buf_pages(data, sectors_to_write << 9), - REQ_OP_WRITE|REQ_META, - GFP_NOIO, &c->btree_bio), struct btree_write_bio, wbio.bio); wbio_init(&wbio->wbio.bio); @@ -2088,6 +2085,7 @@ do_write: wbio->wbio.c = c; wbio->wbio.used_mempool = used_mempool; wbio->wbio.first_btree_write = !b->written; + wbio->wbio.bio.bi_opf = REQ_OP_WRITE|REQ_META; wbio->wbio.bio.bi_end_io = btree_node_write_endio; wbio->wbio.bio.bi_private = b; @@ -2239,6 +2237,8 @@ const char * const bch2_btree_write_types[] = { void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c) { + unsigned i; + printbuf_tabstop_push(out, 20); printbuf_tabstop_push(out, 10); @@ -2248,7 +2248,7 @@ void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c) prt_str(out, "size"); prt_newline(out); - for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) { + for (i = 0; i < BTREE_WRITE_TYPE_NR; i++) { u64 nr = atomic64_read(&c->btree_write_stats[i].nr); u64 bytes = atomic64_read(&c->btree_write_stats[i].bytes); diff --git a/fs/bcachefs/btree_io.h b/fs/bcachefs/btree_io.h index c43fb60b8c82..43c46c800669 100644 --- a/fs/bcachefs/btree_io.h +++ b/fs/bcachefs/btree_io.h @@ -114,7 +114,7 @@ static inline int bset_encrypt(struct bch_fs *c, struct bset *i, unsigned offset if (ret) return ret; - nonce = nonce_add(nonce, round_up(bytes, CHACHA_BLOCK_SIZE)); + nonce = nonce_add(nonce, round_up(bytes, CHACHA20_BLOCK_SIZE)); } return bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, i->_data, diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c index 33269afe9cf2..e2fe93bcc092 100644 --- a/fs/bcachefs/btree_key_cache.c +++ b/fs/bcachefs/btree_key_cache.c @@ -908,7 +908,7 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink, do { struct rhash_head *pos, *next; - pos = rht_ptr_rcu(rht_bucket(tbl, bc->shrink_iter)); + pos = *rht_bucket(tbl, bc->shrink_iter); while (!rht_is_a_nulls(pos)) { next = rht_dereference_bucket_rcu(pos->next, tbl, bc->shrink_iter); @@ -1041,22 +1041,8 @@ void bch2_fs_btree_key_cache_init_early(struct btree_key_cache *c) INIT_LIST_HEAD(&c->freed_nonpcpu); } -static void bch2_btree_key_cache_shrinker_to_text(struct seq_buf *s, struct shrinker *shrink) -{ - struct btree_key_cache *bc = - container_of(shrink, struct btree_key_cache, shrink); - char *cbuf; - size_t buflen = seq_buf_get_buf(s, &cbuf); - struct printbuf out = PRINTBUF_EXTERN(cbuf, buflen); - - bch2_btree_key_cache_to_text(&out, bc); - seq_buf_commit(s, out.pos); -} - int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc) { - struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache); - #ifdef __KERNEL__ bc->pcpu_freed = alloc_percpu(struct btree_key_cache_freelist); if (!bc->pcpu_freed) @@ -1068,11 +1054,10 @@ int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc) bc->table_init_done = true; - bc->shrink.seeks = 0; + bc->shrink.seeks = 1; bc->shrink.count_objects = bch2_btree_key_cache_count; bc->shrink.scan_objects = bch2_btree_key_cache_scan; - bc->shrink.to_text = bch2_btree_key_cache_shrinker_to_text; - if (register_shrinker(&bc->shrink, "%s/btree_key_cache", c->name)) + if (register_shrinker(&bc->shrink)) return -BCH_ERR_ENOMEM_fs_btree_cache_init; return 0; } diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c index b99986653ade..bba7a3936799 100644 --- a/fs/bcachefs/btree_locking.c +++ b/fs/bcachefs/btree_locking.c @@ -14,7 +14,6 @@ void bch2_btree_lock_init(struct btree_bkey_cached_common *b) #ifdef CONFIG_LOCKDEP void bch2_assert_btree_nodes_not_locked(void) { - BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key)); } #endif diff --git a/fs/bcachefs/checksum.c b/fs/bcachefs/checksum.c index 843e138862f6..442ff8387bd9 100644 --- a/fs/bcachefs/checksum.c +++ b/fs/bcachefs/checksum.c @@ -12,7 +12,7 @@ #include <linux/random.h> #include <linux/scatterlist.h> #include <crypto/algapi.h> -#include <crypto/chacha.h> +#include <crypto/chacha20.h> #include <crypto/hash.h> #include <crypto/poly1305.h> #include <crypto/skcipher.h> @@ -94,14 +94,14 @@ static void bch2_checksum_update(struct bch2_checksum_state *state, const void * } } -static inline int do_encrypt_sg(struct crypto_sync_skcipher *tfm, +static inline int do_encrypt_sg(struct crypto_skcipher *tfm, struct nonce nonce, struct scatterlist *sg, size_t len) { - SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm); + SKCIPHER_REQUEST_ON_STACK(req, tfm); int ret; - skcipher_request_set_sync_tfm(req, tfm); + skcipher_request_set_tfm(req, tfm); skcipher_request_set_crypt(req, sg, sg, len, nonce.d); ret = crypto_skcipher_encrypt(req); @@ -111,7 +111,7 @@ static inline int do_encrypt_sg(struct crypto_sync_skcipher *tfm, return ret; } -static inline int do_encrypt(struct crypto_sync_skcipher *tfm, +static inline int do_encrypt(struct crypto_skcipher *tfm, struct nonce nonce, void *buf, size_t len) { @@ -155,8 +155,8 @@ static inline int do_encrypt(struct crypto_sync_skcipher *tfm, int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce, void *buf, size_t len) { - struct crypto_sync_skcipher *chacha20 = - crypto_alloc_sync_skcipher("chacha20", 0, 0); + struct crypto_skcipher *chacha20 = + crypto_alloc_skcipher("chacha20", 0, 0); int ret; if (!chacha20) { @@ -164,8 +164,7 @@ int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce, return PTR_ERR(chacha20); } - ret = crypto_skcipher_setkey(&chacha20->base, - (void *) key, sizeof(*key)); + ret = crypto_skcipher_setkey(chacha20, (void *) key, sizeof(*key)); if (ret) { pr_err("crypto_skcipher_setkey() error: %i", ret); goto err; @@ -173,7 +172,7 @@ int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce, ret = do_encrypt(chacha20, nonce, buf, len); err: - crypto_free_sync_skcipher(chacha20); + crypto_free_skcipher(chacha20); return ret; } @@ -270,7 +269,7 @@ static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type, kunmap_atomic(p); } #else - __bio_for_each_bvec(bv, bio, *iter, *iter) + __bio_for_each_contig_segment(bv, bio, *iter, *iter) bch2_checksum_update(&state, page_address(bv.bv_page) + bv.bv_offset, bv.bv_len); #endif @@ -293,7 +292,7 @@ static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type, kunmap_atomic(p); } #else - __bio_for_each_bvec(bv, bio, *iter, *iter) + __bio_for_each_contig_segment(bv, bio, *iter, *iter) crypto_shash_update(desc, page_address(bv.bv_page) + bv.bv_offset, bv.bv_len); @@ -556,7 +555,7 @@ static int bch2_alloc_ciphers(struct bch_fs *c) int ret; if (!c->chacha20) - c->chacha20 = crypto_alloc_sync_skcipher("chacha20", 0, 0); + c->chacha20 = crypto_alloc_skcipher("chacha20", 0, 0); ret = PTR_ERR_OR_ZERO(c->chacha20); if (ret) { @@ -641,7 +640,7 @@ int bch2_enable_encryption(struct bch_fs *c, bool keyed) goto err; } - ret = crypto_skcipher_setkey(&c->chacha20->base, + ret = crypto_skcipher_setkey(c->chacha20, (void *) &key.key, sizeof(key.key)); if (ret) goto err; @@ -669,7 +668,7 @@ void bch2_fs_encryption_exit(struct bch_fs *c) if (!IS_ERR_OR_NULL(c->poly1305)) crypto_free_shash(c->poly1305); if (!IS_ERR_OR_NULL(c->chacha20)) - crypto_free_sync_skcipher(c->chacha20); + crypto_free_skcipher(c->chacha20); if (!IS_ERR_OR_NULL(c->sha256)) crypto_free_shash(c->sha256); } @@ -701,7 +700,7 @@ int bch2_fs_encryption_init(struct bch_fs *c) if (ret) goto out; - ret = crypto_skcipher_setkey(&c->chacha20->base, + ret = crypto_skcipher_setkey(c->chacha20, (void *) &key.key, sizeof(key.key)); if (ret) goto out; diff --git a/fs/bcachefs/checksum.h b/fs/bcachefs/checksum.h index 409ad534d9f4..ca12f63f579c 100644 --- a/fs/bcachefs/checksum.h +++ b/fs/bcachefs/checksum.h @@ -7,7 +7,7 @@ #include "super-io.h" #include <linux/crc64.h> -#include <crypto/chacha.h> +#include <crypto/chacha20.h> static inline bool bch2_checksum_mergeable(unsigned type) { @@ -151,9 +151,9 @@ static inline bool bch2_crc_cmp(struct bch_csum l, struct bch_csum r) /* for skipping ahead and encrypting/decrypting at an offset: */ static inline struct nonce nonce_add(struct nonce nonce, unsigned offset) { - EBUG_ON(offset & (CHACHA_BLOCK_SIZE - 1)); + EBUG_ON(offset & (CHACHA20_BLOCK_SIZE - 1)); - le32_add_cpu(&nonce.d[0], offset / CHACHA_BLOCK_SIZE); + le32_add_cpu(&nonce.d[0], offset / CHACHA20_BLOCK_SIZE); return nonce; } diff --git a/fs/bcachefs/compress.c b/fs/bcachefs/compress.c index 6bec38440249..8683b144d8f7 100644 --- a/fs/bcachefs/compress.c +++ b/fs/bcachefs/compress.c @@ -45,7 +45,7 @@ static bool bio_phys_contig(struct bio *bio, struct bvec_iter start) struct bvec_iter iter; void *expected_start = NULL; - __bio_for_each_bvec(bv, bio, iter, start) { + __bio_for_each_segment(bv, bio, iter, start) { if (expected_start && expected_start != page_address(bv.bv_page) + bv.bv_offset) return false; @@ -197,9 +197,9 @@ static int __bio_uncompress(struct bch_fs *c, struct bio *src, goto err; workspace = mempool_alloc(&c->decompress_workspace, GFP_NOIO); - ctx = zstd_init_dctx(workspace, zstd_dctx_workspace_bound()); + ctx = ZSTD_initDCtx(workspace, ZSTD_DCtxWorkspaceBound()); - ret = zstd_decompress_dctx(ctx, + ret = ZSTD_decompressDCtx(ctx, dst_data, dst_len, src_data.b + 4, real_src_len); @@ -333,8 +333,8 @@ static int attempt_compress(struct bch_fs *c, return strm.total_out; } case BCH_COMPRESSION_TYPE_zstd: { - ZSTD_CCtx *ctx = zstd_init_cctx(workspace, - zstd_cctx_workspace_bound(&c->zstd_params.cParams)); + ZSTD_CCtx *ctx = ZSTD_initCCtx(workspace, + ZSTD_CCtxWorkspaceBound(c->zstd_params.cParams)); /* * ZSTD requires that when we decompress we pass in the exact @@ -347,11 +347,11 @@ static int attempt_compress(struct bch_fs *c, * factor (7 bytes) from the dst buffer size to account for * that. */ - size_t len = zstd_compress_cctx(ctx, + size_t len = ZSTD_compressCCtx(ctx, dst + 4, dst_len - 4 - 7, src, src_len, - &c->zstd_params); - if (zstd_is_error(len)) + c->zstd_params); + if (ZSTD_isError(len)) return 0; *((__le32 *) dst) = cpu_to_le32(len); @@ -546,7 +546,7 @@ static int _bch2_fs_compress_init(struct bch_fs *c, u64 features) { size_t decompress_workspace_size = 0; bool decompress_workspace_needed; - ZSTD_parameters params = zstd_get_params(0, c->opts.encoded_extent_max); + ZSTD_parameters params = ZSTD_getParams(0, c->opts.encoded_extent_max, 0); struct { unsigned feature; unsigned type; @@ -558,8 +558,8 @@ static int _bch2_fs_compress_init(struct bch_fs *c, u64 features) zlib_deflate_workspacesize(MAX_WBITS, DEF_MEM_LEVEL), zlib_inflate_workspacesize(), }, { BCH_FEATURE_zstd, BCH_COMPRESSION_TYPE_zstd, - zstd_cctx_workspace_bound(¶ms.cParams), - zstd_dctx_workspace_bound() }, + ZSTD_CCtxWorkspaceBound(params.cParams), + ZSTD_DCtxWorkspaceBound() }, }, *i; bool have_compressed = false; diff --git a/fs/bcachefs/darray.h b/fs/bcachefs/darray.h index 978ab7961f1b..4f655bb4ade5 100644 --- a/fs/bcachefs/darray.h +++ b/fs/bcachefs/darray.h @@ -23,7 +23,7 @@ static inline int __darray_make_room(darray_void *d, size_t t_size, size_t more, { if (d->nr + more > d->size) { size_t new_size = roundup_pow_of_two(d->nr + more); - void *data = krealloc_array(d->data, new_size, t_size, gfp); + void *data = krealloc(d->data, new_size * t_size, gfp); if (!data) return -ENOMEM; diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c index d1563caf7fb7..33f7153664af 100644 --- a/fs/bcachefs/debug.c +++ b/fs/bcachefs/debug.c @@ -44,11 +44,11 @@ static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b, if (!bch2_dev_get_ioref(ca, READ)) return false; - bio = bio_alloc_bioset(ca->disk_sb.bdev, + bio = bio_alloc_bioset(GFP_NOIO, buf_pages(n_sorted, btree_bytes(c)), - REQ_OP_READ|REQ_META, - GFP_NOIO, &c->btree_bio); + bio_set_dev(bio, ca->disk_sb.bdev); + bio->bi_opf = REQ_OP_READ|REQ_META; bio->bi_iter.bi_sector = pick.ptr.offset; bch2_bio_map(bio, n_sorted, btree_bytes(c)); @@ -208,11 +208,11 @@ void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c, goto out; } - bio = bio_alloc_bioset(ca->disk_sb.bdev, + bio = bio_alloc_bioset(GFP_NOIO, buf_pages(n_ondisk, btree_bytes(c)), - REQ_OP_READ|REQ_META, - GFP_NOIO, &c->btree_bio); + bio_set_dev(bio, ca->disk_sb.bdev); + bio->bi_opf = REQ_OP_READ|REQ_META; bio->bi_iter.bi_sector = pick.ptr.offset; bch2_bio_map(bio, n_ondisk, btree_bytes(c)); diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c index 1855d08efd4b..08e067ca9e61 100644 --- a/fs/bcachefs/ec.c +++ b/fs/bcachefs/ec.c @@ -417,16 +417,13 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf, this_cpu_add(ca->io_done->sectors[rw][data_type], buf->size); while (offset < bytes) { - unsigned nr_iovecs = min_t(size_t, BIO_MAX_VECS, + unsigned nr_iovecs = min_t(size_t, BIO_MAX_PAGES, DIV_ROUND_UP(bytes, PAGE_SIZE)); unsigned b = min_t(size_t, bytes - offset, nr_iovecs << PAGE_SHIFT); struct ec_bio *ec_bio; - ec_bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, - nr_iovecs, - rw, - GFP_KERNEL, + ec_bio = container_of(bio_alloc_bioset(GFP_KERNEL, nr_iovecs, &c->ec_bioset), struct ec_bio, bio); @@ -434,6 +431,9 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf, ec_bio->buf = buf; ec_bio->idx = idx; + bio_set_dev(&ec_bio->bio, ca->disk_sb.bdev); + bio_set_op_attrs(&ec_bio->bio, rw, 0); + ec_bio->bio.bi_iter.bi_sector = ptr->offset + buf->offset + (offset >> 9); ec_bio->bio.bi_end_io = ec_block_endio; ec_bio->bio.bi_private = cl; @@ -759,6 +759,7 @@ static int ec_stripe_delete(struct btree_trans *trans, u64 idx) struct btree_iter iter; struct bkey_s_c k; struct bkey_s_c_stripe s; + unsigned i; int ret; bch2_trans_iter_init(trans, &iter, BTREE_ID_stripes, POS(0, idx), @@ -775,7 +776,7 @@ static int ec_stripe_delete(struct btree_trans *trans, u64 idx) } s = bkey_s_c_to_stripe(k); - for (unsigned i = 0; i < s.v->nr_blocks; i++) + for (i = 0; i < s.v->nr_blocks; i++) if (stripe_blockcount_get(s.v, i)) { struct printbuf buf = PRINTBUF; diff --git a/fs/bcachefs/errcode.c b/fs/bcachefs/errcode.c index dc906fc9176f..db71fc144061 100644 --- a/fs/bcachefs/errcode.c +++ b/fs/bcachefs/errcode.c @@ -3,8 +3,6 @@ #include "bcachefs.h" #include "errcode.h" -#include <linux/errname.h> - static const char * const bch2_errcode_strs[] = { #define x(class, err) [BCH_ERR_##err - BCH_ERR_START] = #err, BCH_ERRCODES() @@ -31,7 +29,7 @@ const char *bch2_err_str(int err) if (err >= BCH_ERR_START) errstr = bch2_errcode_strs[err - BCH_ERR_START]; else if (err) - errstr = errname(err); + errstr = "(Standard error code)"; else errstr = "(No error)"; return errstr ?: "(Invalid error)"; diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c index df2f317f5443..c138c029f0ba 100644 --- a/fs/bcachefs/fs-io.c +++ b/fs/bcachefs/fs-io.c @@ -77,11 +77,11 @@ static void bch2_inode_flush_nocow_writes_async(struct bch_fs *c, if (!ca) continue; - bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 0, - REQ_OP_FLUSH, - GFP_KERNEL, + bio = container_of(bio_alloc_bioset(GFP_KERNEL, 0, &c->nocow_flush_bioset), struct nocow_flush, bio); + bio_set_dev(&bio->bio, ca->disk_sb.bdev); + bio->bio.bi_opf = REQ_OP_FLUSH; bio->cl = cl; bio->ca = ca; bio->bio.bi_end_io = nocow_flush_endio; @@ -101,15 +101,6 @@ static int bch2_inode_flush_nocow_writes(struct bch_fs *c, return 0; } -static inline bool bio_full(struct bio *bio, unsigned len) -{ - if (bio->bi_vcnt >= bio->bi_max_vecs) - return true; - if (bio->bi_iter.bi_size > UINT_MAX - len) - return true; - return false; -} - static inline struct address_space *faults_disabled_mapping(void) { return (void *) (((unsigned long) current->faults_disabled_mapping) & ~1UL); @@ -377,13 +368,28 @@ static inline struct bch_page_state *bch2_page_state(struct page *page) /* for newly allocated pages: */ static void __bch2_page_state_release(struct page *page) { - kfree(detach_page_private(page)); + struct bch_page_state *s = __bch2_page_state(page); + + if (!s) + return; + + ClearPagePrivate(page); + set_page_private(page, 0); + put_page(page); + kfree(s); } static void bch2_page_state_release(struct page *page) { - EBUG_ON(!PageLocked(page)); - __bch2_page_state_release(page); + struct bch_page_state *s = bch2_page_state(page); + + if (!s) + return; + + ClearPagePrivate(page); + set_page_private(page, 0); + put_page(page); + kfree(s); } /* for newly allocated pages: */ @@ -397,7 +403,13 @@ static struct bch_page_state *__bch2_page_state_create(struct page *page, return NULL; spin_lock_init(&s->lock); - attach_page_private(page, s); + /* + * migrate_page_move_mapping() assumes that pages with private data + * have their count elevated by 1. + */ + get_page(page); + set_page_private(page, (unsigned long) s); + SetPagePrivate(page); return s; } @@ -514,20 +526,22 @@ static void mark_pagecache_unallocated(struct bch_inode_info *inode, { pgoff_t index = start >> PAGE_SECTORS_SHIFT; pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT; - struct folio_batch fbatch; - unsigned i, j; + struct pagevec pvec; if (end <= start) return; - folio_batch_init(&fbatch); + pagevec_init(&pvec); + + do { + unsigned nr_pages, i, j; - while (filemap_get_folios(inode->v.i_mapping, - &index, end_index, &fbatch)) { - for (i = 0; i < folio_batch_count(&fbatch); i++) { - struct folio *folio = fbatch.folios[i]; - u64 pg_start = folio->index << PAGE_SECTORS_SHIFT; - u64 pg_end = (folio->index + 1) << PAGE_SECTORS_SHIFT; + nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping, + &index, end_index); + for (i = 0; i < nr_pages; i++) { + struct page *page = pvec.pages[i]; + u64 pg_start = page->index << PAGE_SECTORS_SHIFT; + u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT; unsigned pg_offset = max(start, pg_start) - pg_start; unsigned pg_len = min(end, pg_end) - pg_offset - pg_start; struct bch_page_state *s; @@ -536,8 +550,8 @@ static void mark_pagecache_unallocated(struct bch_inode_info *inode, BUG_ON(pg_offset >= PAGE_SECTORS); BUG_ON(pg_offset + pg_len > PAGE_SECTORS); - folio_lock(folio); - s = bch2_page_state(&folio->page); + lock_page(page); + s = bch2_page_state(page); if (s) { spin_lock(&s->lock); @@ -546,11 +560,10 @@ static void mark_pagecache_unallocated(struct bch_inode_info *inode, spin_unlock(&s->lock); } - folio_unlock(folio); + unlock_page(page); } - folio_batch_release(&fbatch); - cond_resched(); - } + pagevec_release(&pvec); + } while (index <= end_index); } static void mark_pagecache_reserved(struct bch_inode_info *inode, @@ -559,21 +572,23 @@ static void mark_pagecache_reserved(struct bch_inode_info *inode, struct bch_fs *c = inode->v.i_sb->s_fs_info; pgoff_t index = start >> PAGE_SECTORS_SHIFT; pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT; - struct folio_batch fbatch; + struct pagevec pvec; s64 i_sectors_delta = 0; - unsigned i, j; if (end <= start) return; - folio_batch_init(&fbatch); + pagevec_init(&pvec); - while (filemap_get_folios(inode->v.i_mapping, - &index, end_index, &fbatch)) { - for (i = 0; i < folio_batch_count(&fbatch); i++) { - struct folio *folio = fbatch.folios[i]; - u64 pg_start = folio->index << PAGE_SECTORS_SHIFT; - u64 pg_end = (folio->index + 1) << PAGE_SECTORS_SHIFT; + do { + unsigned nr_pages, i, j; + + nr_pages = pagevec_lookup_range(&pvec, inode->v.i_mapping, + &index, end_index); + for (i = 0; i < nr_pages; i++) { + struct page *page = pvec.pages[i]; + u64 pg_start = page->index << PAGE_SECTORS_SHIFT; + u64 pg_end = (page->index + 1) << PAGE_SECTORS_SHIFT; unsigned pg_offset = max(start, pg_start) - pg_start; unsigned pg_len = min(end, pg_end) - pg_offset - pg_start; struct bch_page_state *s; @@ -582,8 +597,8 @@ static void mark_pagecache_reserved(struct bch_inode_info *inode, BUG_ON(pg_offset >= PAGE_SECTORS); BUG_ON(pg_offset + pg_len > PAGE_SECTORS); - folio_lock(folio); - s = bch2_page_state(&folio->page); + lock_page(page); + s = bch2_page_state(page); if (s) { spin_lock(&s->lock); @@ -602,11 +617,10 @@ static void mark_pagecache_reserved(struct bch_inode_info *inode, spin_unlock(&s->lock); } - folio_unlock(folio); + unlock_page(page); } - folio_batch_release(&fbatch); - cond_resched(); - } + pagevec_release(&pvec); + } while (index <= end_index); i_sectors_acct(c, inode, NULL, i_sectors_delta); } @@ -812,7 +826,7 @@ static void bch2_set_page_dirty(struct bch_fs *c, i_sectors_acct(c, inode, &res->quota, dirty_sectors); if (!PageDirty(page)) - filemap_dirty_folio(inode->v.i_mapping, page_folio(page)); + __set_page_dirty_nobuffers(page); } vm_fault_t bch2_page_fault(struct vm_fault *vmf) @@ -915,31 +929,62 @@ out: return ret; } -void bch2_invalidate_folio(struct folio *folio, size_t offset, size_t length) +void bch2_invalidatepage(struct page *page, unsigned int offset, + unsigned int length) { - if (offset || length < folio_size(folio)) + if (offset || length < PAGE_SIZE) return; - bch2_clear_page_bits(&folio->page); + bch2_clear_page_bits(page); } -bool bch2_release_folio(struct folio *folio, gfp_t gfp_mask) +int bch2_releasepage(struct page *page, gfp_t gfp_mask) { - if (folio_test_dirty(folio) || folio_test_writeback(folio)) - return false; + if (PageDirty(page)) + return 0; - bch2_clear_page_bits(&folio->page); - return true; + bch2_clear_page_bits(page); + return 1; } +#ifdef CONFIG_MIGRATION +int bch2_migrate_page(struct address_space *mapping, struct page *newpage, + struct page *page, enum migrate_mode mode) +{ + int ret; + + EBUG_ON(!PageLocked(page)); + EBUG_ON(!PageLocked(newpage)); + + ret = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0); + if (ret != MIGRATEPAGE_SUCCESS) + return ret; + + if (PagePrivate(page)) { + ClearPagePrivate(page); + get_page(newpage); + set_page_private(newpage, page_private(page)); + set_page_private(page, 0); + put_page(page); + SetPagePrivate(newpage); + } + + if (mode != MIGRATE_SYNC_NO_COPY) + migrate_page_copy(newpage, page); + else + migrate_page_states(newpage, page); + return MIGRATEPAGE_SUCCESS; +} +#endif + /* readpage(s): */ static void bch2_readpages_end_io(struct bio *bio) { - struct bvec_iter_all iter; struct bio_vec *bv; + unsigned i; - bio_for_each_segment_all(bv, bio, iter) { + bio_for_each_segment_all(bv, bio, i) { struct page *page = bv->bv_page; if (!bio->bi_status) { @@ -958,29 +1003,31 @@ struct readpages_iter { struct address_space *mapping; struct page **pages; unsigned nr_pages; + unsigned nr_added; unsigned idx; pgoff_t offset; }; static int readpages_iter_init(struct readpages_iter *iter, - struct readahead_control *ractl) + struct address_space *mapping, + struct list_head *pages, unsigned nr_pages) { - unsigned i, nr_pages = readahead_count(ractl); - memset(iter, 0, sizeof(*iter)); - iter->mapping = ractl->mapping; - iter->offset = readahead_index(ractl); - iter->nr_pages = nr_pages; + iter->mapping = mapping; + iter->offset = list_last_entry(pages, struct page, lru)->index; iter->pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS); if (!iter->pages) return -ENOMEM; - nr_pages = __readahead_batch(ractl, iter->pages, nr_pages); - for (i = 0; i < nr_pages; i++) { - __bch2_page_state_create(iter->pages[i], __GFP_NOFAIL); - put_page(iter->pages[i]); + while (!list_empty(pages)) { + struct page *page = list_last_entry(pages, struct page, lru); + + __bch2_page_state_create(page, __GFP_NOFAIL); + + iter->pages[iter->nr_pages++] = page; + list_del(&page->lru); } return 0; @@ -988,9 +1035,41 @@ static int readpages_iter_init(struct readpages_iter *iter, static inline struct page *readpage_iter_next(struct readpages_iter *iter) { - if (iter->idx >= iter->nr_pages) - return NULL; + struct page *page; + unsigned i; + int ret; + + BUG_ON(iter->idx > iter->nr_added); + BUG_ON(iter->nr_added > iter->nr_pages); + + if (iter->idx < iter->nr_added) + goto out; + + while (1) { + if (iter->idx == iter->nr_pages) + return NULL; + + ret = add_to_page_cache_lru_vec(iter->mapping, + iter->pages + iter->nr_added, + iter->nr_pages - iter->nr_added, + iter->offset + iter->nr_added, + GFP_NOFS); + if (ret > 0) + break; + + page = iter->pages[iter->nr_added]; + iter->idx++; + iter->nr_added++; + + __bch2_page_state_release(page); + put_page(page); + } + iter->nr_added += ret; + + for (i = iter->idx; i < iter->nr_added; i++) + put_page(iter->pages[i]); +out: EBUG_ON(iter->pages[iter->idx]->index != iter->offset + iter->idx); return iter->pages[iter->idx]; @@ -1028,8 +1107,11 @@ static void readpage_bio_extend(struct readpages_iter *iter, if (!get_more) break; - page = xa_load(&iter->mapping->i_pages, page_offset); - if (page && !xa_is_value(page)) + rcu_read_lock(); + page = radix_tree_lookup(&iter->mapping->i_pages, page_offset); + rcu_read_unlock(); + + if (page && !radix_tree_exceptional_entry(page)) break; page = __page_cache_alloc(readahead_gfp_mask(iter->mapping)); @@ -1164,9 +1246,10 @@ err: bch2_bkey_buf_exit(&sk, c); } -void bch2_readahead(struct readahead_control *ractl) +int bch2_readpages(struct file *file, struct address_space *mapping, + struct list_head *pages, unsigned nr_pages) { - struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host); + struct bch_inode_info *inode = to_bch_ei(mapping->host); struct bch_fs *c = inode->v.i_sb->s_fs_info; struct bch_io_opts opts; struct btree_trans trans; @@ -1176,7 +1259,7 @@ void bch2_readahead(struct readahead_control *ractl) bch2_inode_opts_get(&opts, c, &inode->ei_inode); - ret = readpages_iter_init(&readpages_iter, ractl); + ret = readpages_iter_init(&readpages_iter, mapping, pages, nr_pages); BUG_ON(ret); bch2_trans_init(&trans, c, 0, 0); @@ -1188,14 +1271,14 @@ void bch2_readahead(struct readahead_control *ractl) unsigned n = min_t(unsigned, readpages_iter.nr_pages - readpages_iter.idx, - BIO_MAX_VECS); + BIO_MAX_PAGES); struct bch_read_bio *rbio = - rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ, - GFP_NOFS, &c->bio_read), + rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read), opts); readpages_iter.idx++; + bio_set_op_attrs(&rbio->bio, REQ_OP_READ, 0); rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTORS_SHIFT; rbio->bio.bi_end_io = bch2_readpages_end_io; BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0)); @@ -1208,6 +1291,8 @@ void bch2_readahead(struct readahead_control *ractl) bch2_trans_exit(&trans); kfree(readpages_iter.pages); + + return 0; } static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio, @@ -1244,7 +1329,7 @@ static int bch2_read_single_page(struct page *page, bch2_inode_opts_get(&opts, c, &inode->ei_inode); - rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, &c->bio_read), + rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), opts); rbio->bio.bi_private = &done; rbio->bio.bi_end_io = bch2_read_single_page_end_io; @@ -1262,14 +1347,20 @@ static int bch2_read_single_page(struct page *page, return 0; } -int bch2_read_folio(struct file *file, struct folio *folio) +int bch2_readpage(struct file *file, struct page *page) { - struct page *page = &folio->page; - int ret; + struct bch_inode_info *inode = to_bch_ei(page->mapping->host); + struct bch_fs *c = inode->v.i_sb->s_fs_info; + struct bch_io_opts opts; + struct bch_read_bio *rbio; - ret = bch2_read_single_page(page, page->mapping); - folio_unlock(folio); - return bch2_err_class(ret); + bch2_inode_opts_get(&opts, c, &inode->ei_inode); + + rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), opts); + rbio->bio.bi_end_io = bch2_readpages_end_io; + + __bchfs_readpage(c, rbio, inode_inum(inode), page); + return 0; } /* writepages: */ @@ -1294,35 +1385,34 @@ static void bch2_writepage_io_done(struct bch_write_op *op) container_of(op, struct bch_writepage_io, op); struct bch_fs *c = io->op.c; struct bio *bio = &io->op.wbio.bio; - struct bvec_iter_all iter; struct bio_vec *bvec; - unsigned i; + unsigned i, j; if (io->op.error) { set_bit(EI_INODE_ERROR, &io->inode->ei_flags); - bio_for_each_segment_all(bvec, bio, iter) { + bio_for_each_segment_all(bvec, bio, i) { struct bch_page_state *s; SetPageError(bvec->bv_page); - mapping_set_error(bvec->bv_page->mapping, -EIO); + mapping_set_error(io->inode->v.i_mapping, -EIO); s = __bch2_page_state(bvec->bv_page); spin_lock(&s->lock); - for (i = 0; i < PAGE_SECTORS; i++) - s->s[i].nr_replicas = 0; + for (j = 0; j < PAGE_SECTORS; j++) + s->s[j].nr_replicas = 0; spin_unlock(&s->lock); } } if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) { - bio_for_each_segment_all(bvec, bio, iter) { + bio_for_each_segment_all(bvec, bio, i) { struct bch_page_state *s; s = __bch2_page_state(bvec->bv_page); spin_lock(&s->lock); - for (i = 0; i < PAGE_SECTORS; i++) - s->s[i].nr_replicas = 0; + for (j = 0; j < PAGE_SECTORS; j++) + s->s[j].nr_replicas = 0; spin_unlock(&s->lock); } } @@ -1346,7 +1436,7 @@ static void bch2_writepage_io_done(struct bch_write_op *op) */ i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta); - bio_for_each_segment_all(bvec, bio, iter) { + bio_for_each_segment_all(bvec, bio, i) { struct bch_page_state *s = __bch2_page_state(bvec->bv_page); if (atomic_dec_and_test(&s->write_count)) @@ -1377,9 +1467,7 @@ static void bch2_writepage_io_alloc(struct bch_fs *c, { struct bch_write_op *op; - w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS, - REQ_OP_WRITE, - GFP_NOFS, + w->io = container_of(bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &c->writepage_bioset), struct bch_writepage_io, op.wbio.bio); @@ -1499,9 +1587,9 @@ do_io: if (w->io && (w->io->op.res.nr_replicas != nr_replicas_this_write || - bio_full(&w->io->op.wbio.bio, PAGE_SIZE) || + bio_full(&w->io->op.wbio.bio) || w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >= - (BIO_MAX_VECS * PAGE_SIZE) || + (BIO_MAX_PAGES * PAGE_SIZE) || bio_end_sector(&w->io->op.wbio.bio) != sector)) bch2_writepage_do_io(w); @@ -1520,7 +1608,7 @@ do_io: round_up(i_size, block_bytes(c)) && !test_bit(BCH_FS_EMERGENCY_RO, &c->flags), "writing past i_size: %llu > %llu (unrounded %llu)\n", - bio_end_sector(&w->io->op.wbio.bio) << 9, + (u64) bio_end_sector(&w->io->op.wbio.bio) << 9, round_up(i_size, block_bytes(c)), i_size); @@ -1556,7 +1644,7 @@ int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc /* buffered writes: */ int bch2_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, + loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct bch_inode_info *inode = to_bch_ei(mapping->host); @@ -1576,7 +1664,7 @@ int bch2_write_begin(struct file *file, struct address_space *mapping, bch2_pagecache_add_get(inode); - page = grab_cache_page_write_begin(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (!page) goto err_unlock; @@ -1706,7 +1794,7 @@ static int __bch2_buffered_write(struct bch_inode_info *inode, bch2_page_reservation_init(c, inode, &res); for (i = 0; i < nr_pages; i++) { - pages[i] = grab_cache_page_write_begin(mapping, index + i); + pages[i] = grab_cache_page_write_begin(mapping, index + i, 0); if (!pages[i]) { nr_pages = i; if (!i) { @@ -1778,8 +1866,8 @@ static int __bch2_buffered_write(struct bch_inode_info *inode, unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1); unsigned pg_len = min_t(unsigned, reserved - copied, PAGE_SIZE - pg_offset); - unsigned pg_copied = copy_page_from_iter_atomic(page, - pg_offset, pg_len, iter); + unsigned pg_copied = iov_iter_copy_from_user_atomic(page, + iter, pg_offset, pg_len); if (!pg_copied) break; @@ -1792,6 +1880,7 @@ static int __bch2_buffered_write(struct bch_inode_info *inode, } flush_dcache_page(page); + iov_iter_advance(iter, pg_copied); copied += pg_copied; if (pg_copied != pg_len) @@ -1861,11 +1950,11 @@ again: * to check that the address is actually valid, when atomic * usercopies are used, below. */ - if (unlikely(fault_in_iov_iter_readable(iter, bytes))) { + if (unlikely(iov_iter_fault_in_readable(iter, bytes))) { bytes = min_t(unsigned long, iov_iter_count(iter), PAGE_SIZE - offset); - if (unlikely(fault_in_iov_iter_readable(iter, bytes))) { + if (unlikely(iov_iter_fault_in_readable(iter, bytes))) { ret = -EFAULT; break; } @@ -1909,6 +1998,18 @@ again: /* O_DIRECT reads */ +static void bio_release_pages(struct bio *bio, bool mark_dirty) +{ + struct bio_vec *bvec; + unsigned i; + + bio_for_each_segment_all(bvec, bio, i) { + if (mark_dirty && !PageCompound(bvec->bv_page)) + set_page_dirty_lock(bvec->bv_page); + put_page(bvec->bv_page); + } +} + static void bio_check_or_release(struct bio *bio, bool check_dirty) { if (check_dirty) { @@ -1923,7 +2024,7 @@ static void bch2_dio_read_complete(struct closure *cl) { struct dio_read *dio = container_of(cl, struct dio_read, cl); - dio->req->ki_complete(dio->req, dio->ret); + dio->req->ki_complete(dio->req, dio->ret, 0); bio_check_or_release(&dio->rbio.bio, dio->should_dirty); } @@ -1973,10 +2074,8 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter) shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c)); iter->count -= shorten; - bio = bio_alloc_bioset(NULL, - bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS), - REQ_OP_READ, - GFP_KERNEL, + bio = bio_alloc_bioset(GFP_KERNEL, + iov_iter_npages(iter, BIO_MAX_PAGES), &c->dio_read_bioset); bio->bi_end_io = bch2_direct_IO_read_endio; @@ -2010,10 +2109,8 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter) goto start; while (iter->count) { - bio = bio_alloc_bioset(NULL, - bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS), - REQ_OP_READ, - GFP_KERNEL, + bio = bio_alloc_bioset(GFP_KERNEL, + iov_iter_npages(iter, BIO_MAX_PAGES), &c->bio_read); bio->bi_end_io = bch2_direct_IO_read_split_endio; start: @@ -2239,7 +2336,7 @@ static __always_inline long bch2_dio_write_done(struct dio_write *dio) ret = bch2_err_class(ret); if (!sync) { - req->ki_complete(req, ret); + req->ki_complete(req, ret, 0); ret = -EIOCBQUEUED; } return ret; @@ -2251,8 +2348,8 @@ static __always_inline void bch2_dio_write_end(struct dio_write *dio) struct kiocb *req = dio->req; struct bch_inode_info *inode = dio->inode; struct bio *bio = &dio->op.wbio.bio; - struct bvec_iter_all iter; struct bio_vec *bv; + unsigned i; req->ki_pos += (u64) dio->op.written << 9; dio->written += dio->op.written; @@ -2271,9 +2368,8 @@ static __always_inline void bch2_dio_write_end(struct dio_write *dio) mutex_unlock(&inode->ei_quota_lock); } - if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF))) - bio_for_each_segment_all(bv, bio, iter) - put_page(bv->bv_page); + bio_for_each_segment_all(bv, bio, i) + put_page(bv->bv_page); if (unlikely(dio->op.error)) set_bit(EI_INODE_ERROR, &inode->ei_flags); @@ -2385,18 +2481,18 @@ static __always_inline long bch2_dio_write_loop(struct dio_write *dio) if (likely(!dio->iter.count) || dio->op.error) break; - bio_reset(bio, NULL, REQ_OP_WRITE); + bio_reset(bio); } out: return bch2_dio_write_done(dio); err: dio->op.error = ret; - if (!bio_flagged(bio, BIO_NO_PAGE_REF)) { - struct bvec_iter_all iter; + if (true) { struct bio_vec *bv; + unsigned i; - bio_for_each_segment_all(bv, bio, iter) + bio_for_each_segment_all(bv, bio, i) put_page(bv->bv_page); } @@ -2408,13 +2504,13 @@ static noinline __cold void bch2_dio_write_continue(struct dio_write *dio) { struct mm_struct *mm = dio->mm; - bio_reset(&dio->op.wbio.bio, NULL, REQ_OP_WRITE); + bio_reset(&dio->op.wbio.bio); if (mm) - kthread_use_mm(mm); + use_mm(mm); bch2_dio_write_loop(dio); if (mm) - kthread_unuse_mm(mm); + unuse_mm(mm); } static void bch2_dio_write_loop_async(struct bch_write_op *op) @@ -2472,10 +2568,8 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter) locked = false; } - bio = bio_alloc_bioset(NULL, - bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS), - REQ_OP_WRITE, - GFP_KERNEL, + bio = bio_alloc_bioset(GFP_KERNEL, + iov_iter_npages(iter, BIO_MAX_PAGES), &c->dio_write_bioset); dio = container_of(bio, struct dio_write, op.wbio.bio); dio->req = req; @@ -2485,7 +2579,7 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter) dio->loop = false; dio->extending = extending; dio->sync = is_sync_kiocb(req) || extending; - dio->flush = iocb_is_dsync(req) && !c->opts.journal_flush_disabled; + dio->flush = (req->ki_flags & IOCB_DSYNC) && !c->opts.journal_flush_disabled; dio->free_iov = false; dio->quota_res.sectors = 0; dio->written = 0; @@ -2715,7 +2809,7 @@ static int __bch2_truncate_page(struct bch_inode_info *inode, * redirty the full page: */ page_mkclean(page); - filemap_dirty_folio(mapping, page_folio(page)); + __set_page_dirty_nobuffers(page); unlock: unlock_page(page); put_page(page); @@ -2762,7 +2856,7 @@ static int bch2_extend(struct user_namespace *mnt_userns, truncate_setsize(&inode->v, iattr->ia_size); - return bch2_setattr_nonsize(mnt_userns, inode, iattr); + return bch2_setattr_nonsize(inode, iattr); } static int bch2_truncate_finish_fn(struct bch_inode_info *inode, @@ -2886,7 +2980,7 @@ int bch2_truncate(struct user_namespace *mnt_userns, ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL, 0); mutex_unlock(&inode->ei_update_lock); - ret = bch2_setattr_nonsize(mnt_userns, inode, iattr); + ret = bch2_setattr_nonsize(inode, iattr); err: bch2_pagecache_block_put(inode); return bch2_err_class(ret); @@ -3287,10 +3381,6 @@ long bch2_fallocate_dispatch(struct file *file, int mode, inode_dio_wait(&inode->v); bch2_pagecache_block_get(inode); - ret = file_modified(file); - if (ret) - goto err; - if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE))) ret = bchfs_fallocate(inode, mode, offset, len); else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE)) @@ -3301,7 +3391,7 @@ long bch2_fallocate_dispatch(struct file *file, int mode, ret = bchfs_fcollapse_finsert(inode, offset, len, false); else ret = -EOPNOTSUPP; -err: + bch2_pagecache_block_put(inode); inode_unlock(&inode->v); bch2_write_ref_put(c, BCH_WRITE_REF_fallocate); @@ -3362,6 +3452,235 @@ err: return bch2_quota_reservation_add(c, inode, res, sectors, true); } +static int generic_access_check_limits(struct file *file, loff_t pos, + loff_t *count) +{ + struct inode *inode = file->f_mapping->host; + loff_t max_size = inode->i_sb->s_maxbytes; + + if (!(file->f_flags & O_LARGEFILE)) + max_size = MAX_NON_LFS; + + if (unlikely(pos >= max_size)) + return -EFBIG; + *count = min(*count, max_size - pos); + return 0; +} + +static int generic_write_check_limits(struct file *file, loff_t pos, + loff_t *count) +{ + loff_t limit = rlimit(RLIMIT_FSIZE); + + if (limit != RLIM_INFINITY) { + if (pos >= limit) { + send_sig(SIGXFSZ, current, 0); + return -EFBIG; + } + *count = min(*count, limit - pos); + } + + return generic_access_check_limits(file, pos, count); +} + +static int generic_remap_checks(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t *req_count, unsigned int remap_flags) +{ + struct inode *inode_in = file_in->f_mapping->host; + struct inode *inode_out = file_out->f_mapping->host; + uint64_t count = *req_count; + uint64_t bcount; + loff_t size_in, size_out; + loff_t bs = inode_out->i_sb->s_blocksize; + int ret; + + /* The start of both ranges must be aligned to an fs block. */ + if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_out, bs)) + return -EINVAL; + + /* Ensure offsets don't wrap. */ + if (pos_in + count < pos_in || pos_out + count < pos_out) + return -EINVAL; + + size_in = i_size_read(inode_in); + size_out = i_size_read(inode_out); + + /* Dedupe requires both ranges to be within EOF. */ + if ((remap_flags & REMAP_FILE_DEDUP) && + (pos_in >= size_in || pos_in + count > size_in || + pos_out >= size_out || pos_out + count > size_out)) + return -EINVAL; + + /* Ensure the infile range is within the infile. */ + if (pos_in >= size_in) + return -EINVAL; + count = min(count, size_in - (uint64_t)pos_in); + + ret = generic_access_check_limits(file_in, pos_in, &count); + if (ret) + return ret; + + ret = generic_write_check_limits(file_out, pos_out, &count); + if (ret) + return ret; + + /* + * If the user wanted us to link to the infile's EOF, round up to the + * next block boundary for this check. + * + * Otherwise, make sure the count is also block-aligned, having + * already confirmed the starting offsets' block alignment. + */ + if (pos_in + count == size_in) { + bcount = ALIGN(size_in, bs) - pos_in; + } else { + if (!IS_ALIGNED(count, bs)) + count = ALIGN_DOWN(count, bs); + bcount = count; + } + + /* Don't allow overlapped cloning within the same file. */ + if (inode_in == inode_out && + pos_out + bcount > pos_in && + pos_out < pos_in + bcount) + return -EINVAL; + + /* + * We shortened the request but the caller can't deal with that, so + * bounce the request back to userspace. + */ + if (*req_count != count && !(remap_flags & REMAP_FILE_CAN_SHORTEN)) + return -EINVAL; + + *req_count = count; + return 0; +} + +static int generic_remap_check_len(struct inode *inode_in, + struct inode *inode_out, + loff_t pos_out, + loff_t *len, + unsigned int remap_flags) +{ + u64 blkmask = i_blocksize(inode_in) - 1; + loff_t new_len = *len; + + if ((*len & blkmask) == 0) + return 0; + + if ((remap_flags & REMAP_FILE_DEDUP) || + pos_out + *len < i_size_read(inode_out)) + new_len &= ~blkmask; + + if (new_len == *len) + return 0; + + if (remap_flags & REMAP_FILE_CAN_SHORTEN) { + *len = new_len; + return 0; + } + + return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL; +} + +static int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t *len, unsigned int remap_flags) +{ + struct inode *inode_in = file_inode(file_in); + struct inode *inode_out = file_inode(file_out); + bool same_inode = (inode_in == inode_out); + int ret; + + /* Don't touch certain kinds of inodes */ + if (IS_IMMUTABLE(inode_out)) + return -EPERM; + + if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out)) + return -ETXTBSY; + + /* Don't reflink dirs, pipes, sockets... */ + if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode)) + return -EISDIR; + if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode)) + return -EINVAL; + + /* Zero length dedupe exits immediately; reflink goes to EOF. */ + if (*len == 0) { + loff_t isize = i_size_read(inode_in); + + if ((remap_flags & REMAP_FILE_DEDUP) || pos_in == isize) + return 0; + if (pos_in > isize) + return -EINVAL; + *len = isize - pos_in; + if (*len == 0) + return 0; + } + + /* Check that we don't violate system file offset limits. */ + ret = generic_remap_checks(file_in, pos_in, file_out, pos_out, len, + remap_flags); + if (ret) + return ret; + + /* Wait for the completion of any pending IOs on both files */ + inode_dio_wait(inode_in); + if (!same_inode) + inode_dio_wait(inode_out); + + ret = filemap_write_and_wait_range(inode_in->i_mapping, + pos_in, pos_in + *len - 1); + if (ret) + return ret; + + ret = filemap_write_and_wait_range(inode_out->i_mapping, + pos_out, pos_out + *len - 1); + if (ret) + return ret; + + /* + * Check that the extents are the same. + */ + if (remap_flags & REMAP_FILE_DEDUP) { + bool is_same = false; + + ret = vfs_dedupe_file_range_compare(inode_in, pos_in, + inode_out, pos_out, *len, &is_same); + if (ret) + return ret; + if (!is_same) + return -EBADE; + } + + ret = generic_remap_check_len(inode_in, inode_out, pos_out, len, + remap_flags); + if (ret) + return ret; + + /* If can't alter the file contents, we're done. */ + if (!(remap_flags & REMAP_FILE_DEDUP)) { + /* Update the timestamps, since we can alter file contents. */ + if (!(file_out->f_mode & FMODE_NOCMTIME)) { + ret = file_update_time(file_out); + if (ret) + return ret; + } + + /* + * Clear the security bits if the process is not being run by + * root. This keeps people from modifying setuid and setgid + * binaries. + */ + ret = file_remove_privs(file_out); + if (ret) + return ret; + } + + return 0; +} + loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src, struct file *file_dst, loff_t pos_dst, loff_t len, unsigned remap_flags) @@ -3448,9 +3767,9 @@ err: /* fseek: */ -static int folio_data_offset(struct folio *folio, unsigned offset) +static int page_data_offset(struct page *page, unsigned offset) { - struct bch_page_state *s = bch2_page_state(&folio->page); + struct bch_page_state *s = bch2_page_state(page); unsigned i; if (s) @@ -3465,38 +3784,36 @@ static loff_t bch2_seek_pagecache_data(struct inode *vinode, loff_t start_offset, loff_t end_offset) { - struct folio_batch fbatch; + struct address_space *mapping = vinode->i_mapping; + struct page *page; pgoff_t start_index = start_offset >> PAGE_SHIFT; pgoff_t end_index = end_offset >> PAGE_SHIFT; pgoff_t index = start_index; - unsigned i; loff_t ret; int offset; - folio_batch_init(&fbatch); - - while (filemap_get_folios(vinode->i_mapping, - &index, end_index, &fbatch)) { - for (i = 0; i < folio_batch_count(&fbatch); i++) { - struct folio *folio = fbatch.folios[i]; + while (index <= end_index) { + if (find_get_pages_range(mapping, &index, end_index, 1, &page)) { + lock_page(page); - folio_lock(folio); - offset = folio_data_offset(folio, - folio->index == start_index + offset = page_data_offset(page, + page->index == start_index ? start_offset & (PAGE_SIZE - 1) : 0); if (offset >= 0) { - ret = clamp(((loff_t) folio->index << PAGE_SHIFT) + + ret = clamp(((loff_t) page->index << PAGE_SHIFT) + offset, start_offset, end_offset); - folio_unlock(folio); - folio_batch_release(&fbatch); + unlock_page(page); + put_page(page); return ret; } - folio_unlock(folio); + + unlock_page(page); + put_page(page); + } else { + break; } - folio_batch_release(&fbatch); - cond_resched(); } return end_offset; diff --git a/fs/bcachefs/fs-io.h b/fs/bcachefs/fs-io.h index a8835298613a..07bc9e0ea441 100644 --- a/fs/bcachefs/fs-io.h +++ b/fs/bcachefs/fs-io.h @@ -15,13 +15,14 @@ int __must_check bch2_write_inode_size(struct bch_fs *, struct bch_inode_info *, loff_t, unsigned); -int bch2_read_folio(struct file *, struct folio *); +int bch2_readpage(struct file *, struct page *); int bch2_writepages(struct address_space *, struct writeback_control *); -void bch2_readahead(struct readahead_control *); +int bch2_readpages(struct file *, struct address_space *, + struct list_head *, unsigned); int bch2_write_begin(struct file *, struct address_space *, loff_t, - unsigned, struct page **, void **); + unsigned, unsigned, struct page **, void **); int bch2_write_end(struct file *, struct address_space *, loff_t, unsigned, unsigned, struct page *, void *); @@ -34,6 +35,10 @@ int bch2_truncate(struct user_namespace *, struct bch_inode_info *, struct iattr *); long bch2_fallocate_dispatch(struct file *, int, loff_t, loff_t); +#define REMAP_FILE_ADVISORY (0) +#define REMAP_FILE_DEDUP (1 << 0) +#define REMAP_FILE_CAN_SHORTEN (1 << 1) + loff_t bch2_remap_file_range(struct file *, loff_t, struct file *, loff_t, loff_t, unsigned); @@ -41,8 +46,10 @@ loff_t bch2_llseek(struct file *, loff_t, int); vm_fault_t bch2_page_fault(struct vm_fault *); vm_fault_t bch2_page_mkwrite(struct vm_fault *); -void bch2_invalidate_folio(struct folio *, size_t, size_t); -bool bch2_release_folio(struct folio *, gfp_t); +void bch2_invalidatepage(struct page *, unsigned int, unsigned int); +int bch2_releasepage(struct page *, gfp_t); +int bch2_migrate_page(struct address_space *, struct page *, + struct page *, enum migrate_mode); void bch2_fs_fsio_exit(struct bch_fs *); int bch2_fs_fsio_init(struct bch_fs *); diff --git a/fs/bcachefs/fs-ioctl.c b/fs/bcachefs/fs-ioctl.c index 571b4dca4d39..5b323e4c3b43 100644 --- a/fs/bcachefs/fs-ioctl.c +++ b/fs/bcachefs/fs-ioctl.c @@ -93,7 +93,7 @@ static int bch2_ioc_setflags(struct bch_fs *c, return ret; inode_lock(&inode->v); - if (!inode_owner_or_capable(file_mnt_user_ns(file), &inode->v)) { + if (!inode_owner_or_capable(&inode->v)) { ret = -EACCES; goto setflags_out; } @@ -172,7 +172,7 @@ static int bch2_ioc_fssetxattr(struct bch_fs *c, return ret; inode_lock(&inode->v); - if (!inode_owner_or_capable(file_mnt_user_ns(file), &inode->v)) { + if (!inode_owner_or_capable(&inode->v)) { ret = -EACCES; goto err; } @@ -284,20 +284,22 @@ static int bch2_ioc_goingdown(struct bch_fs *c, u32 __user *arg) down_write(&c->vfs_sb->s_umount); switch (flags) { - case FSOP_GOING_FLAGS_DEFAULT: - ret = freeze_bdev(c->vfs_sb->s_bdev); + case FSOP_GOING_FLAGS_DEFAULT: { + struct super_block *sb = freeze_bdev(c->vfs_sb->s_bdev); if (ret) goto err; - bch2_journal_flush(&c->journal); - c->vfs_sb->s_flags |= SB_RDONLY; - bch2_fs_emergency_read_only(c); - thaw_bdev(c->vfs_sb->s_bdev); + if (sb && !IS_ERR(sb)) { + bch2_journal_flush(&c->journal); + c->vfs_sb->s_flags |= SB_RDONLY; + bch2_fs_emergency_read_only(c); + thaw_bdev(c->vfs_sb->s_bdev, sb); + } break; + } case FSOP_GOING_FLAGS_LOGFLUSH: bch2_journal_flush(&c->journal); - fallthrough; case FSOP_GOING_FLAGS_NOLOGFLUSH: c->vfs_sb->s_flags |= SB_RDONLY; @@ -393,8 +395,7 @@ retry: goto err3; } - error = inode_permission(file_mnt_user_ns(filp), - dir, MAY_WRITE | MAY_EXEC); + error = inode_permission(dir, MAY_WRITE | MAY_EXEC); if (error) goto err3; @@ -409,7 +410,7 @@ retry: !arg.src_ptr) snapshot_src.subvol = to_bch_ei(dir)->ei_inode.bi_subvol; - inode = __bch2_create(file_mnt_user_ns(filp), to_bch_ei(dir), + inode = __bch2_create(NULL, to_bch_ei(dir), dst_dentry, arg.mode|S_IFDIR, 0, snapshot_src, create_flags); error = PTR_ERR_OR_ZERO(inode); @@ -461,7 +462,6 @@ static long bch2_ioctl_subvolume_destroy(struct bch_fs *c, struct file *filp, if (ret) goto err; - fsnotify_rmdir(dir, path.dentry); d_delete(path.dentry); err: path_put(&path); diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c index fafd64509f6b..246316d59ccc 100644 --- a/fs/bcachefs/fs.c +++ b/fs/bcachefs/fs.c @@ -222,6 +222,7 @@ __bch2_create(struct user_namespace *mnt_userns, unsigned flags) { struct bch_fs *c = dir->v.i_sb->s_fs_info; + struct user_namespace *ns = dir->v.i_sb->s_user_ns; struct btree_trans trans; struct bch_inode_unpacked dir_u; struct bch_inode_info *inode, *old; @@ -262,8 +263,8 @@ retry: inode_inum(dir), &dir_u, &inode_u, !(flags & BCH_CREATE_TMPFILE) ? &dentry->d_name : NULL, - from_kuid(mnt_userns, current_fsuid()), - from_kgid(mnt_userns, current_fsgid()), + from_kuid(ns, current_fsuid()), + from_kgid(ns, current_fsgid()), mode, rdev, default_acl, acl, snapshot_src, flags) ?: bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, 1, @@ -370,12 +371,11 @@ static struct dentry *bch2_lookup(struct inode *vdir, struct dentry *dentry, return d_splice_alias(vinode, dentry); } -static int bch2_mknod(struct user_namespace *mnt_userns, - struct inode *vdir, struct dentry *dentry, +static int bch2_mknod(struct inode *vdir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct bch_inode_info *inode = - __bch2_create(mnt_userns, to_bch_ei(vdir), dentry, mode, rdev, + __bch2_create(NULL, to_bch_ei(vdir), dentry, mode, rdev, (subvol_inum) { 0 }, 0); if (IS_ERR(inode)) @@ -385,11 +385,10 @@ static int bch2_mknod(struct user_namespace *mnt_userns, return 0; } -static int bch2_create(struct user_namespace *mnt_userns, - struct inode *vdir, struct dentry *dentry, +static int bch2_create(struct inode *vdir, struct dentry *dentry, umode_t mode, bool excl) { - return bch2_mknod(mnt_userns, vdir, dentry, mode|S_IFREG, 0); + return bch2_mknod(vdir, dentry, mode|S_IFREG, 0); } static int __bch2_link(struct bch_fs *c, @@ -486,15 +485,14 @@ static int bch2_unlink(struct inode *vdir, struct dentry *dentry) return __bch2_unlink(vdir, dentry, false); } -static int bch2_symlink(struct user_namespace *mnt_userns, - struct inode *vdir, struct dentry *dentry, +static int bch2_symlink(struct inode *vdir, struct dentry *dentry, const char *symname) { struct bch_fs *c = vdir->i_sb->s_fs_info; struct bch_inode_info *dir = to_bch_ei(vdir), *inode; int ret; - inode = __bch2_create(mnt_userns, dir, dentry, S_IFLNK|S_IRWXUGO, 0, + inode = __bch2_create(NULL, dir, dentry, S_IFLNK|S_IRWXUGO, 0, (subvol_inum) { 0 }, BCH_CREATE_TMPFILE); if (IS_ERR(inode)) return bch2_err_class(PTR_ERR(inode)); @@ -521,14 +519,12 @@ err: return ret; } -static int bch2_mkdir(struct user_namespace *mnt_userns, - struct inode *vdir, struct dentry *dentry, umode_t mode) +static int bch2_mkdir(struct inode *vdir, struct dentry *dentry, umode_t mode) { - return bch2_mknod(mnt_userns, vdir, dentry, mode|S_IFDIR, 0); + return bch2_mknod(vdir, dentry, mode|S_IFDIR, 0); } -static int bch2_rename2(struct user_namespace *mnt_userns, - struct inode *src_vdir, struct dentry *src_dentry, +static int bch2_rename2(struct inode *src_vdir, struct dentry *src_dentry, struct inode *dst_vdir, struct dentry *dst_dentry, unsigned flags) { @@ -634,8 +630,7 @@ err: return ret; } -static void bch2_setattr_copy(struct user_namespace *mnt_userns, - struct bch_inode_info *inode, +static void bch2_setattr_copy(struct bch_inode_info *inode, struct bch_inode_unpacked *bi, struct iattr *attr) { @@ -643,9 +638,9 @@ static void bch2_setattr_copy(struct user_namespace *mnt_userns, unsigned int ia_valid = attr->ia_valid; if (ia_valid & ATTR_UID) - bi->bi_uid = from_kuid(mnt_userns, attr->ia_uid); + bi->bi_uid = from_kuid(c->vfs_sb->s_user_ns, attr->ia_uid); if (ia_valid & ATTR_GID) - bi->bi_gid = from_kgid(mnt_userns, attr->ia_gid); + bi->bi_gid = from_kgid(c->vfs_sb->s_user_ns, attr->ia_gid); if (ia_valid & ATTR_SIZE) bi->bi_size = attr->ia_size; @@ -664,14 +659,13 @@ static void bch2_setattr_copy(struct user_namespace *mnt_userns, : inode->v.i_gid; if (!in_group_p(gid) && - !capable_wrt_inode_uidgid(mnt_userns, &inode->v, CAP_FSETID)) + !capable_wrt_inode_uidgid(&inode->v, CAP_FSETID)) mode &= ~S_ISGID; bi->bi_mode = mode; } } -int bch2_setattr_nonsize(struct user_namespace *mnt_userns, - struct bch_inode_info *inode, +int bch2_setattr_nonsize(struct bch_inode_info *inode, struct iattr *attr) { struct bch_fs *c = inode->v.i_sb->s_fs_info; @@ -687,10 +681,10 @@ int bch2_setattr_nonsize(struct user_namespace *mnt_userns, qid = inode->ei_qid; if (attr->ia_valid & ATTR_UID) - qid.q[QTYP_USR] = from_kuid(mnt_userns, attr->ia_uid); + qid.q[QTYP_USR] = from_kuid(&init_user_ns, attr->ia_uid); if (attr->ia_valid & ATTR_GID) - qid.q[QTYP_GRP] = from_kgid(mnt_userns, attr->ia_gid); + qid.q[QTYP_GRP] = from_kgid(&init_user_ns, attr->ia_gid); ret = bch2_fs_quota_transfer(c, inode, qid, ~0, KEY_TYPE_QUOTA_PREALLOC); @@ -708,7 +702,7 @@ retry: if (ret) goto btree_err; - bch2_setattr_copy(mnt_userns, inode, &inode_u, attr); + bch2_setattr_copy(inode, &inode_u, attr); if (attr->ia_valid & ATTR_MODE) { ret = bch2_acl_chmod(&trans, inode_inum(inode), &inode_u, @@ -740,8 +734,7 @@ err: return bch2_err_class(ret); } -static int bch2_getattr(struct user_namespace *mnt_userns, - const struct path *path, struct kstat *stat, +static int bch2_getattr(const struct path *path, struct kstat *stat, u32 request_mask, unsigned query_flags) { struct bch_inode_info *inode = to_bch_ei(d_inode(path->dentry)); @@ -781,37 +774,34 @@ static int bch2_getattr(struct user_namespace *mnt_userns, return 0; } -static int bch2_setattr(struct user_namespace *mnt_userns, - struct dentry *dentry, struct iattr *iattr) +static int bch2_setattr(struct dentry *dentry, struct iattr *iattr) { struct bch_inode_info *inode = to_bch_ei(dentry->d_inode); int ret; lockdep_assert_held(&inode->v.i_rwsem); - ret = setattr_prepare(mnt_userns, dentry, iattr); + ret = setattr_prepare(dentry, iattr); if (ret) return ret; return iattr->ia_valid & ATTR_SIZE - ? bch2_truncate(mnt_userns, inode, iattr) - : bch2_setattr_nonsize(mnt_userns, inode, iattr); + ? bch2_truncate(NULL, inode, iattr) + : bch2_setattr_nonsize(inode, iattr); } -static int bch2_tmpfile(struct user_namespace *mnt_userns, - struct inode *vdir, struct file *file, umode_t mode) +static int bch2_tmpfile(struct inode *vdir, struct dentry *dentry, umode_t mode) { struct bch_inode_info *inode = - __bch2_create(mnt_userns, to_bch_ei(vdir), - file->f_path.dentry, mode, 0, + __bch2_create(NULL, to_bch_ei(vdir), dentry, mode, 0, (subvol_inum) { 0 }, BCH_CREATE_TMPFILE); if (IS_ERR(inode)) return bch2_err_class(PTR_ERR(inode)); - d_mark_tmpfile(file, &inode->v); - d_instantiate(file->f_path.dentry, &inode->v); - return finish_open_simple(file, 0); + d_mark_tmpfile(dentry, &inode->v); + d_instantiate(dentry, &inode->v); + return 0; } static int bch2_fill_extent(struct bch_fs *c, @@ -885,10 +875,6 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info, u32 snapshot; int ret = 0; - ret = fiemap_prep(&ei->v, info, start, &len, FIEMAP_FLAG_SYNC); - if (ret) - return ret; - if (start + len < start) return -EINVAL; @@ -1004,6 +990,15 @@ static int bch2_vfs_readdir(struct file *file, struct dir_context *ctx) return bch2_readdir(c, inode_inum(inode), ctx); } +static int bch2_clone_file_range(struct file *file_src, loff_t pos_src, + struct file *file_dst, loff_t pos_dst, + u64 len) +{ + return bch2_remap_file_range(file_src, pos_src, + file_dst, pos_dst, + len, 0); +} + static const struct file_operations bch_file_operations = { .llseek = bch2_llseek, .read_iter = bch2_read_iter, @@ -1018,7 +1013,7 @@ static const struct file_operations bch_file_operations = { #ifdef CONFIG_COMPAT .compat_ioctl = bch2_compat_fs_ioctl, #endif - .remap_file_range = bch2_remap_file_range, + .clone_file_range = bch2_clone_file_range, }; static const struct inode_operations bch_file_inode_operations = { @@ -1085,17 +1080,17 @@ static const struct inode_operations bch_special_inode_operations = { }; static const struct address_space_operations bch_address_space_operations = { - .read_folio = bch2_read_folio, + .readpage = bch2_readpage, .writepages = bch2_writepages, - .readahead = bch2_readahead, - .dirty_folio = filemap_dirty_folio, + .readpages = bch2_readpages, + .set_page_dirty = __set_page_dirty_nobuffers, .write_begin = bch2_write_begin, .write_end = bch2_write_end, - .invalidate_folio = bch2_invalidate_folio, - .release_folio = bch2_release_folio, + .invalidatepage = bch2_invalidatepage, + .releasepage = bch2_releasepage, .direct_IO = noop_direct_IO, #ifdef CONFIG_MIGRATION - .migrate_folio = filemap_migrate_folio, + .migratepage = bch2_migrate_page, #endif .error_remove_page = generic_error_remove_page, }; @@ -1572,14 +1567,13 @@ static int bch2_sync_fs(struct super_block *sb, int wait) static struct bch_fs *bch2_path_to_fs(const char *path) { struct bch_fs *c; - dev_t dev; - int ret; + struct block_device *bdev = lookup_bdev(path); - ret = lookup_bdev(path, &dev); - if (ret) - return ERR_PTR(ret); + if (IS_ERR(bdev)) + return ERR_CAST(bdev); - c = bch2_dev_to_fs(dev); + c = bch2_dev_to_fs(bdev->bd_dev); + bdput(bdev); if (c) closure_put(&c->cl); return c ?: ERR_PTR(-ENOENT); @@ -1844,8 +1838,6 @@ got_sb: sb->s_xattr = bch2_xattr_handlers; sb->s_magic = BCACHEFS_STATFS_MAGIC; sb->s_time_gran = c->sb.nsec_per_time_unit; - sb->s_time_min = div_s64(S64_MIN, c->sb.time_units_per_sec) + 1; - sb->s_time_max = div_s64(S64_MAX, c->sb.time_units_per_sec); c->vfs_sb = sb; strscpy(sb->s_id, c->name, sizeof(sb->s_id)); @@ -1853,7 +1845,9 @@ got_sb: if (ret) goto err_put_super; - sb->s_bdi->ra_pages = VM_READAHEAD_PAGES; + sb->s_bdi->congested_fn = bch2_congested; + sb->s_bdi->congested_data = c; + sb->s_bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE; for_each_online_member(ca, c, i) { struct block_device *bdev = ca->disk_sb.bdev; @@ -1872,7 +1866,7 @@ got_sb: sb->s_flags |= SB_POSIXACL; #endif - sb->s_shrink.seeks = 0; + sb->s_shrink.seeks = 1; vinode = bch2_vfs_inode_get(c, BCACHEFS_ROOT_SUBVOL_INUM); ret = PTR_ERR_OR_ZERO(vinode); diff --git a/fs/bcachefs/fs.h b/fs/bcachefs/fs.h index cf0413534182..bc6614a9ecf1 100644 --- a/fs/bcachefs/fs.h +++ b/fs/bcachefs/fs.h @@ -184,8 +184,7 @@ void bch2_inode_update_after_write(struct btree_trans *, int __must_check bch2_write_inode(struct bch_fs *, struct bch_inode_info *, inode_set_fn, void *, unsigned); -int bch2_setattr_nonsize(struct user_namespace *, - struct bch_inode_info *, +int bch2_setattr_nonsize(struct bch_inode_info *, struct iattr *); int __bch2_unlink(struct inode *, struct dentry *, bool); diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c index ed2523ac2249..49b6427dd965 100644 --- a/fs/bcachefs/fsck.c +++ b/fs/bcachefs/fsck.c @@ -2180,8 +2180,8 @@ static void inc_link(struct bch_fs *c, struct snapshots_seen *s, if (inum < range_start || inum >= range_end) return; - link = __inline_bsearch(&key, links->d, links->nr, - sizeof(links->d[0]), nlink_cmp); + link = bsearch(&key, links->d, links->nr, + sizeof(links->d[0]), nlink_cmp); if (!link) return; diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c index c3da325a25c8..c5c600bd0d38 100644 --- a/fs/bcachefs/io.c +++ b/fs/bcachefs/io.c @@ -150,10 +150,10 @@ static bool bch2_target_congested(struct bch_fs *c, u16 target) void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio) { - struct bvec_iter_all iter; struct bio_vec *bv; + unsigned i; - bio_for_each_segment_all(bv, bio, iter) + bio_for_each_segment_all(bv, bio, i) if (bv->bv_page != ZERO_PAGE(0)) mempool_free(bv->bv_page, &c->bio_bounce_pages); bio->bi_vcnt = 0; @@ -661,8 +661,8 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c, ca = bch_dev_bkey_exists(c, ptr->dev); if (to_entry(ptr + 1) < ptrs.end) { - n = to_wbio(bio_alloc_clone(NULL, &wbio->bio, - GFP_NOIO, &ca->replica_set)); + n = to_wbio(bio_clone_fast(&wbio->bio, GFP_NOIO, + &ca->replica_set)); n->bio.bi_end_io = wbio->bio.bi_end_io; n->bio.bi_private = wbio->bio.bi_private; @@ -975,10 +975,9 @@ static struct bio *bch2_write_bio_alloc(struct bch_fs *c, ? ((unsigned long) buf & (PAGE_SIZE - 1)) : 0), PAGE_SIZE); - pages = min(pages, BIO_MAX_VECS); + pages = min_t(unsigned, pages, BIO_MAX_PAGES); - bio = bio_alloc_bioset(NULL, pages, 0, - GFP_NOIO, &c->bio_write); + bio = bio_alloc_bioset(GFP_NOIO, pages, &c->bio_write); wbio = wbio_init(bio); wbio->put_bio = true; /* copy WRITE_SYNC flag */ @@ -1902,9 +1901,6 @@ void bch2_write_op_to_text(struct printbuf *out, struct bch_write_op *op) prt_bitflags(out, bch2_write_flags, op->flags); prt_newline(out); - prt_printf(out, "ref: %u", closure_nr_remaining(&op->cl)); - prt_newline(out); - printbuf_indent_sub(out, 2); } @@ -2033,7 +2029,7 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans, goto err; rbio_init(&(*rbio)->bio, opts); - bio_init(&(*rbio)->bio, NULL, (*rbio)->bio.bi_inline_vecs, pages, 0); + bio_init(&(*rbio)->bio, (*rbio)->bio.bi_inline_vecs, pages); if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9, GFP_NOIO)) @@ -2048,7 +2044,7 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans, goto err; bio = &op->write.op.wbio.bio; - bio_init(bio, NULL, bio->bi_inline_vecs, pages, 0); + bio_init(bio, bio->bi_inline_vecs, pages); ret = bch2_data_update_init(trans, NULL, &op->write, writepoint_hashed((unsigned long) current), @@ -2748,10 +2744,8 @@ get_bio: } else if (bounce) { unsigned sectors = pick.crc.compressed_size; - rbio = rbio_init(bio_alloc_bioset(NULL, + rbio = rbio_init(bio_alloc_bioset(GFP_NOIO, DIV_ROUND_UP(sectors, PAGE_SECTORS), - 0, - GFP_NOIO, &c->bio_read_split), orig->opts); @@ -2767,8 +2761,8 @@ get_bio: * from the whole bio, in which case we don't want to retry and * lose the error) */ - rbio = rbio_init(bio_alloc_clone(NULL, &orig->bio, GFP_NOIO, - &c->bio_read_split), + rbio = rbio_init(bio_clone_fast(&orig->bio, GFP_NOIO, + &c->bio_read_split), orig->opts); rbio->bio.bi_iter = iter; rbio->split = true; @@ -3013,7 +3007,7 @@ err: if (ret) { bch_err_inum_offset_ratelimited(c, inum.inum, - bvec_iter.bi_sector << 9, + (u64) bvec_iter.bi_sector << 9, "read error %i from btree lookup", ret); rbio->bio.bi_status = BLK_STS_IOERR; bch2_rbio_done(rbio); diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c index 3f0e6d71aa32..fb9cc4a8e971 100644 --- a/fs/bcachefs/journal.c +++ b/fs/bcachefs/journal.c @@ -1180,11 +1180,11 @@ int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb) nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE); - ca->journal.bio = bio_kmalloc(nr_bvecs, GFP_KERNEL); + ca->journal.bio = bio_kmalloc(GFP_KERNEL, nr_bvecs); if (!ca->journal.bio) return -BCH_ERR_ENOMEM_dev_journal_init; - bio_init(ca->journal.bio, NULL, ca->journal.bio->bi_inline_vecs, nr_bvecs, 0); + bio_init(ca->journal.bio, ca->journal.bio->bi_inline_vecs, nr_bvecs); ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL); if (!ja->buckets) diff --git a/fs/bcachefs/journal.h b/fs/bcachefs/journal.h index 024cea9f5902..dff4cb7c20a4 100644 --- a/fs/bcachefs/journal.h +++ b/fs/bcachefs/journal.h @@ -279,7 +279,7 @@ static inline void bch2_journal_res_put(struct journal *j, if (!res->ref) return; - lock_release(&j->res_map, _THIS_IP_); + lock_release(&j->res_map, 0, _THIS_IP_); while (res->u64s) bch2_journal_add_entry(j, res, diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c index 45b1b839783d..24a80a039f9e 100644 --- a/fs/bcachefs/journal_io.c +++ b/fs/bcachefs/journal_io.c @@ -874,10 +874,10 @@ reread: end - offset, buf->size >> 9); nr_bvecs = buf_pages(buf->data, sectors_read << 9); - bio = bio_kmalloc(nr_bvecs, GFP_KERNEL); - bio_init(bio, ca->disk_sb.bdev, bio->bi_inline_vecs, nr_bvecs, REQ_OP_READ); - + bio = bio_kmalloc(GFP_KERNEL, nr_bvecs); + bio_set_dev(bio, ca->disk_sb.bdev); bio->bi_iter.bi_sector = offset; + bio_set_op_attrs(bio, REQ_OP_READ, 0); bch2_bio_map(bio, buf->data, sectors_read << 9); ret = submit_bio_wait(bio); @@ -1595,10 +1595,12 @@ static void do_journal_write(struct closure *cl) sectors); bio = ca->journal.bio; - bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META); + bio_reset(bio); + bio_set_dev(bio, ca->disk_sb.bdev); bio->bi_iter.bi_sector = ptr->offset; bio->bi_end_io = journal_write_endio; bio->bi_private = ca; + bio->bi_opf = REQ_OP_WRITE|REQ_SYNC|REQ_META; BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector); ca->prev_journal_sector = bio->bi_iter.bi_sector; @@ -1844,7 +1846,9 @@ retry_alloc: percpu_ref_get(&ca->io_ref); bio = ca->journal.bio; - bio_reset(bio, ca->disk_sb.bdev, REQ_OP_FLUSH); + bio_reset(bio); + bio_set_dev(bio, ca->disk_sb.bdev); + bio->bi_opf = REQ_OP_FLUSH; bio->bi_end_io = journal_write_endio; bio->bi_private = ca; closure_bio_submit(bio, cl); diff --git a/fs/bcachefs/journal_reclaim.c b/fs/bcachefs/journal_reclaim.c index 37c6846a30aa..9455cae3dee1 100644 --- a/fs/bcachefs/journal_reclaim.c +++ b/fs/bcachefs/journal_reclaim.c @@ -13,6 +13,7 @@ #include <linux/kthread.h> #include <linux/sched/mm.h> +#include <linux/sched/task.h> #include <trace/events/bcachefs.h> /* Free space calculations: */ @@ -267,11 +268,11 @@ void bch2_journal_do_discards(struct journal *j) while (should_discard_bucket(j, ja)) { if (!c->opts.nochanges && ca->mi.discard && - bdev_max_discard_sectors(ca->disk_sb.bdev)) + blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev))) blkdev_issue_discard(ca->disk_sb.bdev, bucket_to_sector(ca, ja->buckets[ja->discard_idx]), - ca->mi.bucket_size, GFP_NOIO); + ca->mi.bucket_size, GFP_NOIO, 0); spin_lock(&j->lock); ja->discard_idx = (ja->discard_idx + 1) % ja->nr; @@ -709,7 +710,7 @@ static int bch2_journal_reclaim_thread(void *arg) j->next_reclaim = now + delay; while (1) { - set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); + set_current_state(TASK_INTERRUPTIBLE); if (kthread_should_stop()) break; if (j->reclaim_kicked) diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c index d7bcdc88657a..2a4641af4d7a 100644 --- a/fs/bcachefs/move.c +++ b/fs/bcachefs/move.c @@ -300,7 +300,7 @@ static int bch2_move_extent(struct btree_trans *trans, io->read_sectors = k.k->size; io->write_sectors = k.k->size; - bio_init(&io->write.op.wbio.bio, NULL, io->bi_inline_vecs, pages, 0); + bio_init(&io->write.op.wbio.bio, io->bi_inline_vecs, pages); bio_set_prio(&io->write.op.wbio.bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); @@ -310,7 +310,7 @@ static int bch2_move_extent(struct btree_trans *trans, io->rbio.c = c; io->rbio.opts = io_opts; - bio_init(&io->rbio.bio, NULL, io->bi_inline_vecs, pages, 0); + bio_init(&io->rbio.bio, io->bi_inline_vecs, pages); io->rbio.bio.bi_vcnt = pages; bio_set_prio(&io->rbio.bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); io->rbio.bio.bi_iter.bi_size = sectors << 9; diff --git a/fs/bcachefs/reflink.c b/fs/bcachefs/reflink.c index d8426e754cdf..e46b13145bf3 100644 --- a/fs/bcachefs/reflink.c +++ b/fs/bcachefs/reflink.c @@ -234,12 +234,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans, r_p = bkey_i_to_reflink_p(orig); set_bkey_val_bytes(&r_p->k, sizeof(r_p->v)); - /* FORTIFY_SOURCE is broken here, and doesn't provide unsafe_memset() */ -#if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE) - __underlying_memset(&r_p->v, 0, sizeof(r_p->v)); -#else memset(&r_p->v, 0, sizeof(r_p->v)); -#endif r_p->v.idx = cpu_to_le64(bkey_start_offset(&r_v->k)); diff --git a/fs/bcachefs/str_hash.h b/fs/bcachefs/str_hash.h index 6178ae620ff1..fa411a3a10d5 100644 --- a/fs/bcachefs/str_hash.h +++ b/fs/bcachefs/str_hash.h @@ -13,7 +13,7 @@ #include <linux/crc32c.h> #include <crypto/hash.h> -#include <crypto/sha2.h> +#include <crypto/sha.h> static inline enum bch_str_hash_type bch2_str_hash_opt_to_type(struct bch_fs *c, enum bch_str_hash_opts opt) diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c index 519df09917e3..fdc22355cdf4 100644 --- a/fs/bcachefs/super-io.c +++ b/fs/bcachefs/super-io.c @@ -141,13 +141,12 @@ int bch2_sb_realloc(struct bch_sb_handle *sb, unsigned u64s) if (sb->have_bio) { unsigned nr_bvecs = DIV_ROUND_UP(new_buffer_size, PAGE_SIZE); - bio = bio_kmalloc(nr_bvecs, GFP_KERNEL); + bio = bio_kmalloc(GFP_KERNEL, nr_bvecs); if (!bio) return -BCH_ERR_ENOMEM_sb_bio_realloc; - bio_init(bio, NULL, bio->bi_inline_vecs, nr_bvecs, 0); - - kfree(sb->bio); + if (sb->bio) + bio_put(sb->bio); sb->bio = bio; } @@ -517,8 +516,10 @@ static int read_one_super(struct bch_sb_handle *sb, u64 offset, struct printbuf size_t bytes; int ret; reread: - bio_reset(sb->bio, sb->bdev, REQ_OP_READ|REQ_SYNC|REQ_META); + bio_reset(sb->bio); + bio_set_dev(sb->bio, sb->bdev); sb->bio->bi_iter.bi_sector = offset; + bio_set_op_attrs(sb->bio, REQ_OP_READ, REQ_SYNC|REQ_META); bch2_bio_map(sb->bio, sb->sb, sb->buffer_size); ret = submit_bio_wait(sb->bio); @@ -648,8 +649,10 @@ int bch2_read_super(const char *path, struct bch_opts *opts, * Error reading primary superblock - read location of backup * superblocks: */ - bio_reset(sb->bio, sb->bdev, REQ_OP_READ|REQ_SYNC|REQ_META); + bio_reset(sb->bio); + bio_set_dev(sb->bio, sb->bdev); sb->bio->bi_iter.bi_sector = BCH_SB_LAYOUT_SECTOR; + bio_set_op_attrs(sb->bio, REQ_OP_READ, REQ_SYNC|REQ_META); /* * use sb buffer to read layout, since sb buffer is page aligned but * layout won't be: @@ -733,10 +736,12 @@ static void read_back_super(struct bch_fs *c, struct bch_dev *ca) struct bch_sb *sb = ca->disk_sb.sb; struct bio *bio = ca->disk_sb.bio; - bio_reset(bio, ca->disk_sb.bdev, REQ_OP_READ|REQ_SYNC|REQ_META); + bio_reset(bio); + bio_set_dev(bio, ca->disk_sb.bdev); bio->bi_iter.bi_sector = le64_to_cpu(sb->layout.sb_offset[0]); bio->bi_end_io = write_super_endio; bio->bi_private = ca; + bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC|REQ_META); bch2_bio_map(bio, ca->sb_read_scratch, PAGE_SIZE); this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_sb], @@ -757,10 +762,12 @@ static void write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx) sb->csum = csum_vstruct(c, BCH_SB_CSUM_TYPE(sb), null_nonce(), sb); - bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META); + bio_reset(bio); + bio_set_dev(bio, ca->disk_sb.bdev); bio->bi_iter.bi_sector = le64_to_cpu(sb->offset); bio->bi_end_io = write_super_endio; bio->bi_private = ca; + bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META); bch2_bio_map(bio, sb, roundup((size_t) vstruct_bytes(sb), bdev_logical_block_size(ca->disk_sb.bdev))); @@ -1250,8 +1257,7 @@ void bch2_journal_super_entries_add_common(struct bch_fs *c, u->entry.type = BCH_JSET_ENTRY_data_usage; u->v = cpu_to_le64(c->usage_base->replicas[i]); - unsafe_memcpy(&u->r, e, replicas_entry_bytes(e), - "embedded variable length struct"); + memcpy(&u->r, e, replicas_entry_bytes(e)); } for_each_member_device(ca, c, dev) { diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c index 613d09f5b8e6..28fb6eba1694 100644 --- a/fs/bcachefs/super.c +++ b/fs/bcachefs/super.c @@ -66,19 +66,10 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>"); #define KTYPE(type) \ -static const struct attribute_group type ## _group = { \ - .attrs = type ## _files \ -}; \ - \ -static const struct attribute_group *type ## _groups[] = { \ - &type ## _group, \ - NULL \ -}; \ - \ -static const struct kobj_type type ## _ktype = { \ +struct kobj_type type ## _ktype = { \ .release = type ## _release, \ .sysfs_ops = &type ## _sysfs_ops, \ - .default_groups = type ## _groups \ + .default_attrs = type ## _files \ } static void bch2_fs_release(struct kobject *); @@ -183,6 +174,44 @@ static void bch2_dev_usage_journal_reserve(struct bch_fs *c) &c->dev_usage_journal_res, u64s * nr); } +int bch2_congested(void *data, int bdi_bits) +{ + struct bch_fs *c = data; + struct backing_dev_info *bdi; + struct bch_dev *ca; + unsigned i; + int ret = 0; + + rcu_read_lock(); + if (bdi_bits & (1 << WB_sync_congested)) { + /* Reads - check all devices: */ + for_each_readable_member(ca, c, i) { + bdi = ca->disk_sb.bdev->bd_bdi; + + if (bdi_congested(bdi, bdi_bits)) { + ret = 1; + break; + } + } + } else { + const struct bch_devs_mask *devs = + bch2_target_to_mask(c, c->opts.foreground_target) ?: + &c->rw_devs[BCH_DATA_user]; + + for_each_member_device_rcu(ca, c, i, devs) { + bdi = ca->disk_sb.bdev->bd_bdi; + + if (bdi_congested(bdi, bdi_bits)) { + ret = 1; + break; + } + } + } + rcu_read_unlock(); + + return ret; +} + /* Filesystem RO/RW: */ /* @@ -255,6 +284,8 @@ static void bch2_writes_disabled(struct percpu_ref *writes) void bch2_fs_read_only(struct bch_fs *c) { + unsigned i; + if (!test_bit(BCH_FS_RW, &c->flags)) { bch2_journal_reclaim_stop(&c->journal); return; @@ -270,7 +301,7 @@ void bch2_fs_read_only(struct bch_fs *c) #ifndef BCH_WRITE_REF_DEBUG percpu_ref_kill(&c->writes); #else - for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++) + for (i = 0; i < BCH_WRITE_REF_NR; i++) bch2_write_ref_put(c, i); #endif @@ -413,7 +444,7 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early) #ifndef BCH_WRITE_REF_DEBUG percpu_ref_reinit(&c->writes); #else - for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++) { + for (i = 0; i < BCH_WRITE_REF_NR; i++) { BUG_ON(atomic_long_read(&c->writes[i])); atomic_long_inc(&c->writes[i]); } @@ -535,7 +566,8 @@ void __bch2_fs_stop(struct bch_fs *c) for_each_member_device(ca, c, i) if (ca->kobj.state_in_sysfs && ca->disk_sb.bdev) - sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs"); + sysfs_remove_link(&part_to_dev(ca->disk_sb.bdev->bd_part)->kobj, + "bcachefs"); if (c->kobj.state_in_sysfs) kobject_del(&c->kobj); @@ -1020,7 +1052,8 @@ static void bch2_dev_free(struct bch_dev *ca) if (ca->kobj.state_in_sysfs && ca->disk_sb.bdev) - sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs"); + sysfs_remove_link(&part_to_dev(ca->disk_sb.bdev->bd_part)->kobj, + "bcachefs"); if (ca->kobj.state_in_sysfs) kobject_del(&ca->kobj); @@ -1056,7 +1089,10 @@ static void __bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca) wait_for_completion(&ca->io_ref_completion); if (ca->kobj.state_in_sysfs) { - sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs"); + struct kobject *block = + &part_to_dev(ca->disk_sb.bdev->bd_part)->kobj; + + sysfs_remove_link(block, "bcachefs"); sysfs_remove_link(&ca->kobj, "block"); } @@ -1093,12 +1129,12 @@ static int bch2_dev_sysfs_online(struct bch_fs *c, struct bch_dev *ca) } if (ca->disk_sb.bdev) { - struct kobject *block = bdev_kobj(ca->disk_sb.bdev); + struct kobject *block = + &part_to_dev(ca->disk_sb.bdev->bd_part)->kobj; ret = sysfs_create_link(block, &ca->kobj, "bcachefs"); if (ret) return ret; - ret = sysfs_create_link(&ca->kobj, block, "block"); if (ret) return ret; @@ -1824,19 +1860,22 @@ err: } /* return with ref on ca->ref: */ -struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *name) +struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *path) { + struct block_device *bdev = lookup_bdev(path); struct bch_dev *ca; unsigned i; - rcu_read_lock(); - for_each_member_device_rcu(ca, c, i, NULL) - if (!strcmp(name, ca->name)) + if (IS_ERR(bdev)) + return ERR_CAST(bdev); + + for_each_member_device(ca, c, i) + if (ca->disk_sb.bdev == bdev) goto found; + ca = ERR_PTR(-ENOENT); found: - rcu_read_unlock(); - + bdput(bdev); return ca; } diff --git a/fs/bcachefs/super.h b/fs/bcachefs/super.h index d4e939c808fa..820e1352aac4 100644 --- a/fs/bcachefs/super.h +++ b/fs/bcachefs/super.h @@ -224,6 +224,7 @@ static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b) struct bch_fs *bch2_dev_to_fs(dev_t); struct bch_fs *bch2_uuid_to_fs(uuid_le); +int bch2_congested(void *, int); bool bch2_dev_state_allowed(struct bch_fs *, struct bch_dev *, enum bch_member_state, int); diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c index 0f86a6c0c9d8..a339ee27ceaa 100644 --- a/fs/bcachefs/sysfs.c +++ b/fs/bcachefs/sysfs.c @@ -211,9 +211,11 @@ const char * const bch2_write_refs[] = { static void bch2_write_refs_to_text(struct printbuf *out, struct bch_fs *c) { + unsigned i; + bch2_printbuf_tabstop_push(out, 24); - for (unsigned i = 0; i < ARRAY_SIZE(c->writes); i++) { + for (i = 0; i < ARRAY_SIZE(c->writes); i++) { prt_str(out, bch2_write_refs[i]); prt_tab(out); prt_printf(out, "%li", atomic_long_read(&c->writes[i])); @@ -291,7 +293,7 @@ static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c incompressible_sectors = 0, compressed_sectors_compressed = 0, compressed_sectors_uncompressed = 0; - int ret; + int ret = 0; if (!test_bit(BCH_FS_STARTED, &c->flags)) return -EPERM; diff --git a/fs/bcachefs/util.c b/fs/bcachefs/util.c index c50473d4925d..d2115e81e99d 100644 --- a/fs/bcachefs/util.c +++ b/fs/bcachefs/util.c @@ -268,26 +268,7 @@ void bch2_print_string_as_lines(const char *prefix, const char *lines) int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *task) { - unsigned nr_entries = 0; - int ret = 0; - - stack->nr = 0; - ret = darray_make_room(stack, 32); - if (ret) - return ret; - - if (!down_read_trylock(&task->signal->exec_update_lock)) - return -1; - - do { - nr_entries = stack_trace_save_tsk(task, stack->data, stack->size, 0); - } while (nr_entries == stack->size && - !(ret = darray_make_room(stack, stack->size * 2))); - - stack->nr = nr_entries; - up_read(&task->signal->exec_update_lock); - - return ret; + return 0; } void bch2_prt_backtrace(struct printbuf *out, bch_stacktrace *stack) @@ -764,7 +745,7 @@ void bch2_bio_map(struct bio *bio, void *base, size_t size) int _bch2_bio_alloc_pages(struct bio *bio, size_t size, gfp_t gfp_mask) { while (size) { - struct page *page = _alloc_pages(gfp_mask, 0); + struct page *page = alloc_pages(gfp_mask, 0); unsigned len = min_t(size_t, PAGE_SIZE, size); if (!page) diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h index ecfe54012e3d..aae71ebfae5b 100644 --- a/fs/bcachefs/util.h +++ b/fs/bcachefs/util.h @@ -21,6 +21,13 @@ #include "darray.h" +#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - 9) +#define PAGE_SECTORS (1U << PAGE_SECTORS_SHIFT) + +#define fallthrough __attribute__((__fallthrough__)) + +#define unsafe_memcpy(_dst, _src, _n, _reason) memcpy(_dst, _src, _n) + struct closure; #ifdef CONFIG_BCACHEFS_DEBUG @@ -44,6 +51,10 @@ struct closure; (__builtin_types_compatible_p(typeof(_val), _type) || \ __builtin_types_compatible_p(typeof(_val), const _type)) +#ifndef alloc_hooks +#define alloc_hooks(_fn, _ret, _fail) _fn +#endif + /* Userspace doesn't align allocations as nicely as the kernel allocators: */ static inline size_t buf_pages(void *p, size_t len) { @@ -62,9 +73,9 @@ static inline void vpfree(void *p, size_t size) static inline void *_vpmalloc(size_t size, gfp_t gfp_mask) { - return (void *) _get_free_pages(gfp_mask|__GFP_NOWARN, + return (void *) __get_free_pages(gfp_mask|__GFP_NOWARN, get_order(size)) ?: - __vmalloc(size, gfp_mask); + __vmalloc(size, gfp_mask, PAGE_KERNEL); } #define vpmalloc(_size, _gfp) \ alloc_hooks(_vpmalloc(_size, _gfp), void *, NULL) @@ -80,7 +91,7 @@ static inline void kvpfree(void *p, size_t size) static inline void *_kvpmalloc(size_t size, gfp_t gfp_mask) { return size < PAGE_SIZE - ? _kmalloc(size, gfp_mask) + ? kmalloc(size, gfp_mask) : _vpmalloc(size, gfp_mask); } #define kvpmalloc(_size, _gfp) \ @@ -722,6 +733,35 @@ static inline void memset_u64s_tail(void *s, int c, unsigned bytes) memset(s + bytes, c, rem); } +static inline struct bio_vec next_contig_bvec(struct bio *bio, + struct bvec_iter *iter) +{ + struct bio_vec bv = bio_iter_iovec(bio, *iter); + + bio_advance_iter(bio, iter, bv.bv_len); +#ifndef CONFIG_HIGHMEM + while (iter->bi_size) { + struct bio_vec next = bio_iter_iovec(bio, *iter); + + if (page_address(bv.bv_page) + bv.bv_offset + bv.bv_len != + page_address(next.bv_page) + next.bv_offset) + break; + + bv.bv_len += next.bv_len; + bio_advance_iter(bio, iter, next.bv_len); + } +#endif + return bv; +} + +#define __bio_for_each_contig_segment(bv, bio, iter, start) \ + for (iter = (start); \ + (iter).bi_size && \ + ((bv = next_contig_bvec((bio), &(iter))), 1);) + +#define bio_for_each_contig_segment(bv, bio, iter) \ + __bio_for_each_contig_segment(bv, bio, iter, (bio)->bi_iter) + void sort_cmp_size(void *base, size_t num, size_t size, int (*cmp_func)(const void *, const void *, size_t), void (*swap_func)(void *, void *, size_t)); diff --git a/fs/bcachefs/varint.c b/fs/bcachefs/varint.c index 5143b603bf67..a2d6bb7136c7 100644 --- a/fs/bcachefs/varint.c +++ b/fs/bcachefs/varint.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/bitops.h> -#include <linux/math.h> #include <linux/string.h> #include <asm/unaligned.h> diff --git a/fs/bcachefs/xattr.c b/fs/bcachefs/xattr.c index 9f77bb2ecf5f..f44df3d60527 100644 --- a/fs/bcachefs/xattr.c +++ b/fs/bcachefs/xattr.c @@ -378,7 +378,6 @@ static int bch2_xattr_get_handler(const struct xattr_handler *handler, } static int bch2_xattr_set_handler(const struct xattr_handler *handler, - struct user_namespace *mnt_userns, struct dentry *dentry, struct inode *vinode, const char *name, const void *value, size_t size, int flags) @@ -517,7 +516,6 @@ static int inode_opt_set_fn(struct bch_inode_info *inode, } static int bch2_xattr_bcachefs_set(const struct xattr_handler *handler, - struct user_namespace *mnt_userns, struct dentry *dentry, struct inode *vinode, const char *name, const void *value, size_t size, int flags) diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h index c74b7376990d..5aa1986128df 100644 --- a/include/linux/generic-radix-tree.h +++ b/include/linux/generic-radix-tree.h @@ -40,7 +40,6 @@ #include <linux/bug.h> #include <linux/limits.h> #include <linux/log2.h> -#include <linux/math.h> #include <linux/types.h> struct genradix_root; diff --git a/include/trace/events/bcachefs.h b/include/trace/events/bcachefs.h index 8f0f16061285..039bbe85d66c 100644 --- a/include/trace/events/bcachefs.h +++ b/include/trace/events/bcachefs.h @@ -104,10 +104,10 @@ DECLARE_EVENT_CLASS(bio, ), TP_fast_assign( - __entry->dev = bio->bi_bdev ? bio_dev(bio) : 0; + __entry->dev = bio->bi_disk ? bio_dev(bio) : 0; __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio->bi_iter.bi_size >> 9; - blk_fill_rwbs(__entry->rwbs, bio->bi_opf); + blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); ), TP_printk("%d,%d %s %llu + %u", diff --git a/kernel/locking/six.c b/kernel/locking/six.c index 3d366a843eb5..537a8e54c479 100644 --- a/kernel/locking/six.c +++ b/kernel/locking/six.c @@ -13,6 +13,18 @@ #include <trace/events/lock.h> +static inline bool owner_on_cpu(struct task_struct *owner) +{ + /* + * As lock holder preemption issue, we both skip spinning if + * task is not on cpu or its cpu is preempted + */ + return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner)); +} + +#define trace_contention_begin(_lock, _n) +#define trace_contention_end(_lock, _n) + #ifdef DEBUG #define EBUG_ON(cond) BUG_ON(cond) #else @@ -20,7 +32,7 @@ #endif #define six_acquire(l, t, r, ip) lock_acquire(l, 0, t, r, 1, NULL, ip) -#define six_release(l, ip) lock_release(l, ip) +#define six_release(l, ip) lock_release(l, 0, ip) static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type); diff --git a/lib/mean_and_variance.c b/lib/mean_and_variance.c index bd08da5f9e70..724bb245a3fb 100644 --- a/lib/mean_and_variance.c +++ b/lib/mean_and_variance.c @@ -38,7 +38,6 @@ #include <linux/compiler.h> #include <linux/export.h> #include <linux/limits.h> -#include <linux/math.h> #include <linux/math64.h> #include <linux/mean_and_variance.h> #include <linux/module.h> |