summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2020-10-30 16:57:02 -0400
committerKent Overstreet <kent.overstreet@gmail.com>2020-10-30 16:57:02 -0400
commit08b17fe6c75112bd09bf716cabc270e9b314ec3d (patch)
tree01443df4433137f8341702ccb7fe6d2d18655d0f
parentecca499edd509cd758656d90db17b78964e77da8 (diff)
bcachefs: disable vmap optimization
suspected to be buggy
-rw-r--r--fs/bcachefs/compress.c11
1 files changed, 3 insertions, 8 deletions
diff --git a/fs/bcachefs/compress.c b/fs/bcachefs/compress.c
index b50d2b0d5fd3..e6b65ceef38e 100644
--- a/fs/bcachefs/compress.c
+++ b/fs/bcachefs/compress.c
@@ -61,12 +61,6 @@ static struct bbuf __bio_map_or_bounce(struct bch_fs *c, struct bio *bio,
struct bvec_iter start, int rw)
{
struct bbuf ret;
- struct bio_vec bv;
- struct bvec_iter iter;
- unsigned nr_pages = 0;
- struct page *stack_pages[16];
- struct page **pages = NULL;
- void *data;
BUG_ON(bvec_iter_sectors(start) > c->sb.encoded_extent_max);
@@ -77,7 +71,7 @@ static struct bbuf __bio_map_or_bounce(struct bch_fs *c, struct bio *bio,
bio_iter_offset(bio, start),
.type = BB_NONE, .rw = rw
};
-
+#if 0
/* check if we can map the pages contiguously: */
__bio_for_each_segment(bv, bio, iter, start) {
if (iter.bi_size != start.bi_size &&
@@ -113,6 +107,7 @@ static struct bbuf __bio_map_or_bounce(struct bch_fs *c, struct bio *bio,
.type = BB_VMAP, .rw = rw
};
bounce:
+#endif
ret = __bounce_alloc(c, start.bi_size, rw);
if (rw == READ)
@@ -132,7 +127,7 @@ static void bio_unmap_or_unbounce(struct bch_fs *c, struct bbuf buf)
case BB_NONE:
break;
case BB_VMAP:
- vunmap((void *) ((unsigned long) buf.b & PAGE_MASK));
+ BUG();
break;
case BB_KMALLOC:
kfree(buf.b);