diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2016-08-06 03:09:43 -0800 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2016-09-25 06:00:45 -0800 |
commit | ba3da3036d617b7174e4fd81a7482cdfe448a0de (patch) | |
tree | 830bb68e690839af17cc51ba65c96192e44bfbaf | |
parent | 2df6ee3235b74f807f683602ae254ebb2865df8e (diff) |
transactional fcollapsebcache-transactions
-rw-r--r-- | drivers/md/bcache/fs-io.c | 138 |
1 files changed, 80 insertions, 58 deletions
diff --git a/drivers/md/bcache/fs-io.c b/drivers/md/bcache/fs-io.c index b2594a27c00c..1cd4b5f29fe6 100644 --- a/drivers/md/bcache/fs-io.c +++ b/drivers/md/bcache/fs-io.c @@ -11,6 +11,7 @@ #include "journal.h" #include "io.h" #include "keylist.h" +#include "trans.h" #include <linux/aio.h> #include <linux/backing-dev.h> @@ -1998,73 +1999,47 @@ out: return ret; } -static long bch_fcollapse(struct inode *inode, loff_t offset, loff_t len) +int bch_fcollapse_transactional(struct cache_set *c, + struct bch_transaction *trans) { - struct address_space *mapping = inode->i_mapping; - struct bch_inode_info *ei = to_bch_ei(inode); - struct cache_set *c = inode->i_sb->s_fs_info; - struct btree_iter src; - struct btree_iter dst; +} + +static long bch_fcollapse_btree(struct cache_set *c, + struct bch_inode_info *ei, + struct bpos pos, + struct bpos end, + u64 shift) +{ + struct btree_iter dst, src; BKEY_PADDED(k) copy; struct bkey_s_c k; struct i_sectors_hook i_sectors_hook; - loff_t new_size; int ret; - if ((offset | len) & (PAGE_SIZE - 1)) - return -EINVAL; - - bch_btree_iter_init_intent(&dst, c, BTREE_ID_EXTENTS, - POS(inode->i_ino, offset >> 9)); + bch_btree_iter_init_intent(&dst, c, BTREE_ID_EXTENTS, pos); /* position will be set from dst iter's position: */ bch_btree_iter_init(&src, c, BTREE_ID_EXTENTS, POS_MIN); bch_btree_iter_link(&src, &dst); - /* - * We need i_mutex to keep the page cache consistent with the extents - * btree, and the btree consistent with i_size - we don't need outside - * locking for the extents btree itself, because we're using linked - * iterators - */ - inode_lock(inode); - inode_dio_wait(inode); - pagecache_block_get(&mapping->add_lock); - - ret = -EINVAL; - if (offset + len >= inode->i_size) - goto err; - - if (inode->i_size < len) - goto err; - - new_size = inode->i_size - len; - - ret = write_invalidate_inode_pages_range(inode->i_mapping, - offset, LLONG_MAX); - if (ret) - goto err; - ret = i_sectors_dirty_get(ei, &i_sectors_hook); if (ret) - goto err; + return ret; - while (bkey_cmp(dst.pos, - POS(inode->i_ino, - round_up(new_size, PAGE_SIZE) >> 9)) < 0) { + while (bkey_cmp(dst.pos, end) < 0) { struct disk_reservation disk_res; bch_btree_iter_set_pos(&src, - POS(dst.pos.inode, dst.pos.offset + (len >> 9))); + POS(dst.pos.inode, dst.pos.offset + shift)); /* Have to take intent locks before read locks: */ ret = bch_btree_iter_traverse(&dst); if (ret) - goto err_unwind; + goto err; k = bch_btree_iter_peek_with_holes(&src); if (!k.k) { ret = -EIO; - goto err_unwind; + goto err; } bkey_reassemble(©.k, k); @@ -2073,7 +2048,7 @@ static long bch_fcollapse(struct inode *inode, loff_t offset, loff_t len) copy.k.k.type = KEY_TYPE_DISCARD; bch_cut_front(src.pos, ©.k); - copy.k.k.p.offset -= len >> 9; + copy.k.k.p.offset -= shift; BUG_ON(bkey_cmp(dst.pos, bkey_start_pos(©.k.k))); @@ -2089,7 +2064,7 @@ static long bch_fcollapse(struct inode *inode, loff_t offset, loff_t len) bch_disk_reservation_put(c, &disk_res); if (ret < 0 && ret != -EINTR) - goto err_unwind; + goto err; bch_btree_iter_unlock(&src); } @@ -2097,33 +2072,80 @@ static long bch_fcollapse(struct inode *inode, loff_t offset, loff_t len) bch_btree_iter_unlock(&src); bch_btree_iter_unlock(&dst); - ret = bch_inode_truncate(c, inode->i_ino, - round_up(new_size, PAGE_SIZE) >> 9, + ret = bch_inode_truncate(c, end.inode, end.offset, &i_sectors_hook.hook, &ei->journal_seq); if (ret) - goto err_unwind; + goto err; i_sectors_dirty_put(ei, &i_sectors_hook); mutex_lock(&ei->update_lock); - i_size_write(inode, new_size); - ret = bch_write_inode_size(c, ei, inode->i_size); + i_size_write(&ei->vfs_inode, ei->vfs_inode.i_size - (shift << 9)); + ret = bch_write_inode_size(c, ei, ei->vfs_inode.i_size); mutex_unlock(&ei->update_lock); + return 0; +err: + bch_btree_iter_unlock(&src); + bch_btree_iter_unlock(&dst); + i_sectors_dirty_put(ei, &i_sectors_hook); + return ret; +} + +int bch_fcollapse_replay(struct cache_set *c, + struct bch_transaction *trans) +{ +} + +static long bch_fcollapse(struct inode *inode, loff_t offset, loff_t len) +{ + struct address_space *mapping = inode->i_mapping; + struct bch_inode_info *ei = to_bch_ei(inode); + struct cache_set *c = inode->i_sb->s_fs_info; + loff_t new_size; + int ret; + + if ((offset | len) & (PAGE_SIZE - 1)) + return -EINVAL; + + /* + * We need i_mutex to keep the page cache consistent with the extents + * btree, and the btree consistent with i_size - we don't need outside + * locking for the extents btree itself, because we're using linked + * iterators + */ + inode_lock(inode); + inode_dio_wait(inode); + pagecache_block_get(&mapping->add_lock); + + ret = -EINVAL; + if (offset + len >= inode->i_size) + goto err; + + if (inode->i_size < len) + goto err; + + new_size = inode->i_size - len; + + ret = write_invalidate_inode_pages_range(inode->i_mapping, + offset, LLONG_MAX); + if (ret) + goto err; + + ret = bch_fcollapse_btree(c, ei, + POS(inode->i_ino, offset >> 9), + POS(inode->i_ino, + round_up(new_size, PAGE_SIZE) >> 9), + len >> 9); + if (ret) + goto err; + pagecache_block_put(&mapping->add_lock); inode_unlock(inode); return ret; -err_unwind: - /* - * XXX: we've left data with multiple pointers... which isn't a _super_ - * serious problem... - */ - i_sectors_dirty_put(ei, &i_sectors_hook); err: - bch_btree_iter_unlock(&src); - bch_btree_iter_unlock(&dst); pagecache_block_put(&mapping->add_lock); inode_unlock(inode); return ret; |