summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2021-07-13 16:03:51 -0400
committerKent Overstreet <kent.overstreet@gmail.com>2022-03-12 20:13:02 -0500
commit4108d6bed957295213e31c76d2b29017ff5b0f8e (patch)
tree90587b887227c497d0945400068d32bcccecc545
parent4a410ca88242e13e6bcfdafdcd1836e2cca9ad63 (diff)
bcachefs: Add safe versions of varint encode/decode
This adds safe versions of bch2_varint_(encode|decode) that don't read or write past the end of the buffer, or varint being encoded. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r--fs/bcachefs/alloc_background.c4
-rw-r--r--fs/bcachefs/inode.c6
-rw-r--r--fs/bcachefs/varint.c73
-rw-r--r--fs/bcachefs/varint.h3
4 files changed, 80 insertions, 6 deletions
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index 083e51465bee..82e6ee8117b5 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -130,7 +130,7 @@ static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
#define x(_name, _bits) \
if (fieldnr < a.v->nr_fields) { \
- ret = bch2_varint_decode(in, end, &v); \
+ ret = bch2_varint_decode_fast(in, end, &v); \
if (ret < 0) \
return ret; \
in += ret; \
@@ -166,7 +166,7 @@ static void bch2_alloc_pack_v2(struct bkey_alloc_buf *dst,
nr_fields++; \
\
if (src._name) { \
- out += bch2_varint_encode(out, src._name); \
+ out += bch2_varint_encode_fast(out, src._name); \
\
last_nonzero_field = out; \
last_nonzero_fieldnr = nr_fields; \
diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c
index 59edb4cea5f1..46f32f978dc9 100644
--- a/fs/bcachefs/inode.c
+++ b/fs/bcachefs/inode.c
@@ -137,7 +137,7 @@ static void bch2_inode_pack_v2(struct bkey_inode_buf *packed,
nr_fields++; \
\
if (inode->_name) { \
- ret = bch2_varint_encode(out, inode->_name); \
+ ret = bch2_varint_encode_fast(out, inode->_name); \
out += ret; \
\
if (_bits > 64) \
@@ -246,13 +246,13 @@ static int bch2_inode_unpack_v2(struct bkey_s_c_inode inode,
#define x(_name, _bits) \
if (fieldnr < INODE_NR_FIELDS(inode.v)) { \
- ret = bch2_varint_decode(in, end, &v[0]); \
+ ret = bch2_varint_decode_fast(in, end, &v[0]); \
if (ret < 0) \
return ret; \
in += ret; \
\
if (_bits > 64) { \
- ret = bch2_varint_decode(in, end, &v[1]); \
+ ret = bch2_varint_decode_fast(in, end, &v[1]); \
if (ret < 0) \
return ret; \
in += ret; \
diff --git a/fs/bcachefs/varint.c b/fs/bcachefs/varint.c
index a3d252c741c8..e6a041541792 100644
--- a/fs/bcachefs/varint.c
+++ b/fs/bcachefs/varint.c
@@ -1,10 +1,18 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bitops.h>
+#include <linux/string.h>
#include <asm/unaligned.h>
#include "varint.h"
+/**
+ * bch2_varint_encode - encode a variable length integer
+ * @out - destination to encode to
+ * @v - unsigned integer to encode
+ *
+ * Returns the size in bytes of the encoded integer - at most 9 bytes
+ */
int bch2_varint_encode(u8 *out, u64 v)
{
unsigned bits = fls64(v|1);
@@ -13,17 +21,80 @@ int bch2_varint_encode(u8 *out, u64 v)
if (likely(bytes < 9)) {
v <<= bytes;
v |= ~(~0 << (bytes - 1));
+ v = cpu_to_le64(v);
+ memcpy(out, &v, bytes);
} else {
*out++ = 255;
bytes = 9;
+ put_unaligned_le64(v, out);
}
- put_unaligned_le64(v, out);
return bytes;
}
+/**
+ * bch2_varint_decode - encode a variable length integer
+ * @in - varint to decode
+ * @end - end of buffer to decode from
+ * @out - on success, decoded integer
+ *
+ * Returns the size in bytes of the decoded integer - or -1 on failure (would
+ * have read past the end of the buffer)
+ */
int bch2_varint_decode(const u8 *in, const u8 *end, u64 *out)
{
+ unsigned bytes = likely(in < end)
+ ? ffz(*in & 255) + 1
+ : 1;
+ u64 v;
+
+ if (unlikely(in + bytes > end))
+ return -1;
+
+ if (likely(bytes < 9)) {
+ v = 0;
+ memcpy(&v, in, bytes);
+ v = le64_to_cpu(v);
+ v >>= bytes;
+ } else {
+ v = get_unaligned_le64(++in);
+ }
+
+ *out = v;
+ return bytes;
+}
+
+/**
+ * bch2_varint_encode_fast - fast version of bch2_varint_encode
+ *
+ * This version assumes it's always safe to write 8 bytes to @out, even if the
+ * encoded integer would be smaller.
+ */
+int bch2_varint_encode_fast(u8 *out, u64 v)
+{
+ unsigned bits = fls64(v|1);
+ unsigned bytes = DIV_ROUND_UP(bits, 7);
+
+ if (likely(bytes < 9)) {
+ v <<= bytes;
+ v |= ~(~0 << (bytes - 1));
+ } else {
+ *out++ = 255;
+ bytes = 9;
+ }
+
+ put_unaligned_le64(v, out);
+ return bytes;
+}
+
+/**
+ * bch2_varint_decode_fast - fast version of bch2_varint_decode
+ *
+ * This version assumes that it is safe to read at most 8 bytes past the end of
+ * @end (we still return an error if the varint extends past @end).
+ */
+int bch2_varint_decode_fast(const u8 *in, const u8 *end, u64 *out)
+{
u64 v = get_unaligned_le64(in);
unsigned bytes = ffz(v & 255) + 1;
diff --git a/fs/bcachefs/varint.h b/fs/bcachefs/varint.h
index 8daf813576b7..92a182fb3d7a 100644
--- a/fs/bcachefs/varint.h
+++ b/fs/bcachefs/varint.h
@@ -5,4 +5,7 @@
int bch2_varint_encode(u8 *, u64);
int bch2_varint_decode(const u8 *, const u8 *, u64 *);
+int bch2_varint_encode_fast(u8 *, u64);
+int bch2_varint_decode_fast(const u8 *, const u8 *, u64 *);
+
#endif /* _BCACHEFS_VARINT_H */