summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bio.h11
-rw-r--r--include/linux/blk-mq.h2
-rw-r--r--include/linux/bug.h12
-rw-r--r--include/linux/ceph/osd_client.h6
-rw-r--r--include/linux/ceph/osdmap.h13
-rw-r--r--include/linux/ceph/rados.h2
-rw-r--r--include/linux/cgroup-defs.h57
-rw-r--r--include/linux/cgroup.h2
-rw-r--r--include/linux/cgroup_rdma.h53
-rw-r--r--include/linux/cgroup_subsys.h4
-rw-r--r--include/linux/cma.h3
-rw-r--r--include/linux/compat.h4
-rw-r--r--include/linux/compiler-gcc.h15
-rw-r--r--include/linux/compiler.h35
-rw-r--r--include/linux/crush/crush.h41
-rw-r--r--include/linux/crush/mapper.h16
-rw-r--r--include/linux/dax.h12
-rw-r--r--include/linux/dcache.h4
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/dma-contiguous.h4
-rw-r--r--include/linux/dma-mapping.h55
-rw-r--r--include/linux/fs.h5
-rw-r--r--include/linux/fsl-diu-fb.h4
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/huge_mm.h83
-rw-r--r--include/linux/i2c.h1
-rw-r--r--include/linux/idr.h148
-rw-r--r--include/linux/iomap.h3
-rw-r--r--include/linux/iopoll.h2
-rw-r--r--include/linux/ipmi.h2
-rw-r--r--include/linux/jump_label.h23
-rw-r--r--include/linux/kasan.h4
-rw-r--r--include/linux/kconfig.h2
-rw-r--r--include/linux/kernel.h10
-rw-r--r--include/linux/kernfs.h12
-rw-r--r--include/linux/kprobes.h19
-rw-r--r--include/linux/lz4.h701
-rw-r--r--include/linux/memblock.h1
-rw-r--r--include/linux/memory.h3
-rw-r--r--include/linux/mfd/tps65910.h1
-rw-r--r--include/linux/mic_bus.h2
-rw-r--r--include/linux/migrate.h4
-rw-r--r--include/linux/mlx4/driver.h2
-rw-r--r--include/linux/mm.h82
-rw-r--r--include/linux/mm_inline.h7
-rw-r--r--include/linux/mm_types.h23
-rw-r--r--include/linux/mmu_notifier.h14
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--include/linux/mtd/qinfo.h2
-rw-r--r--include/linux/netdevice.h29
-rw-r--r--include/linux/nvme-rdma.h24
-rw-r--r--include/linux/nvme.h10
-rw-r--r--include/linux/pfn_t.h12
-rw-r--r--include/linux/pid.h4
-rw-r--r--include/linux/pid_namespace.h6
-rw-r--r--include/linux/platform_data/rtc-m48t86.h16
-rw-r--r--include/linux/platform_data/video-imxfb.h4
-rw-r--r--include/linux/platform_data/x86/clk-pmc-atom.h44
-rw-r--r--include/linux/platform_data/x86/pmc_atom.h158
-rw-r--r--include/linux/radix-tree.h179
-rw-r--r--include/linux/rbtree_augmented.h4
-rw-r--r--include/linux/refcount.h278
-rw-r--r--include/linux/rmap.h52
-rw-r--r--include/linux/rodata_test.h23
-rw-r--r--include/linux/sched.h43
-rw-r--r--include/linux/sed-opal.h5
-rw-r--r--include/linux/sem.h2
-rw-r--r--include/linux/shmem_fs.h7
-rw-r--r--include/linux/spi/flash.h2
-rw-r--r--include/linux/sunrpc/cache.h6
-rw-r--r--include/linux/sunrpc/rpc_rdma.h9
-rw-r--r--include/linux/sunrpc/svc.h12
-rw-r--r--include/linux/sunrpc/svc_rdma.h13
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/linux/timer.h2
-rw-r--r--include/linux/userfaultfd_k.h41
-rw-r--r--include/linux/watchdog.h7
-rw-r--r--include/linux/workqueue.h4
-rw-r--r--include/linux/writeback.h2
79 files changed, 1738 insertions, 778 deletions
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 7cf8a6c70a3f..8e521194f6fc 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -183,7 +183,7 @@ static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
-static inline unsigned bio_segments(struct bio *bio)
+static inline unsigned __bio_segments(struct bio *bio, struct bvec_iter *bvec)
{
unsigned segs = 0;
struct bio_vec bv;
@@ -205,12 +205,17 @@ static inline unsigned bio_segments(struct bio *bio)
break;
}
- bio_for_each_segment(bv, bio, iter)
+ __bio_for_each_segment(bv, bio, iter, *bvec)
segs++;
return segs;
}
+static inline unsigned bio_segments(struct bio *bio)
+{
+ return __bio_segments(bio, &bio->bi_iter);
+}
+
/*
* get a reference to a bio, so it won't disappear. the intended use is
* something like:
@@ -384,6 +389,8 @@ extern void bio_put(struct bio *);
extern void __bio_clone_fast(struct bio *, struct bio *);
extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
+extern struct bio *bio_clone_bioset_partial(struct bio *, gfp_t,
+ struct bio_set *, int, int);
extern struct bio_set *fs_bio_set;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 8e4df3d6c8cd..001d30d727c5 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -33,6 +33,7 @@ struct blk_mq_hw_ctx {
struct blk_mq_ctx **ctxs;
unsigned int nr_ctx;
+ wait_queue_t dispatch_wait;
atomic_t wait_index;
struct blk_mq_tags *tags;
@@ -160,6 +161,7 @@ enum {
BLK_MQ_S_STOPPED = 0,
BLK_MQ_S_TAG_ACTIVE = 1,
BLK_MQ_S_SCHED_RESTART = 2,
+ BLK_MQ_S_TAG_WAITING = 3,
BLK_MQ_MAX_DEPTH = 10240,
diff --git a/include/linux/bug.h b/include/linux/bug.h
index baff2e8fc8a8..5828489309bb 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -124,18 +124,20 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr,
/*
* Since detected data corruption should stop operation on the affected
- * structures, this returns false if the corruption condition is found.
+ * structures. Return value must be checked and sanely acted on by caller.
*/
+static inline __must_check bool check_data_corruption(bool v) { return v; }
#define CHECK_DATA_CORRUPTION(condition, fmt, ...) \
- do { \
- if (unlikely(condition)) { \
+ check_data_corruption(({ \
+ bool corruption = unlikely(condition); \
+ if (corruption) { \
if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \
pr_err(fmt, ##__VA_ARGS__); \
BUG(); \
} else \
WARN(1, fmt, ##__VA_ARGS__); \
- return false; \
} \
- } while (0)
+ corruption; \
+ }))
#endif /* _LINUX_BUG_H */
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 03a6653d329a..2ea0c282f3dc 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -22,7 +22,6 @@ struct ceph_osd_client;
* completion callback for async writepages
*/
typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *);
-typedef void (*ceph_osdc_unsafe_callback_t)(struct ceph_osd_request *, bool);
#define CEPH_HOMELESS_OSD -1
@@ -170,15 +169,12 @@ struct ceph_osd_request {
unsigned int r_num_ops;
int r_result;
- bool r_got_reply;
struct ceph_osd_client *r_osdc;
struct kref r_kref;
bool r_mempool;
- struct completion r_completion;
- struct completion r_done_completion; /* fsync waiter */
+ struct completion r_completion; /* private to osd_client.c */
ceph_osdc_callback_t r_callback;
- ceph_osdc_unsafe_callback_t r_unsafe_callback;
struct list_head r_unsafe_item;
struct inode *r_inode; /* for use by callbacks */
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index 9a9041784dcf..938656f70807 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -57,7 +57,7 @@ static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool)
case CEPH_POOL_TYPE_EC:
return false;
default:
- BUG_ON(1);
+ BUG();
}
}
@@ -82,13 +82,6 @@ void ceph_oloc_copy(struct ceph_object_locator *dest,
void ceph_oloc_destroy(struct ceph_object_locator *oloc);
/*
- * Maximum supported by kernel client object name length
- *
- * (probably outdated: must be >= RBD_MAX_MD_NAME_LEN -- currently 100)
- */
-#define CEPH_MAX_OID_NAME_LEN 100
-
-/*
* 51-char inline_name is long enough for all cephfs and all but one
* rbd requests: <imgname> in "<imgname>.rbd"/"rbd_id.<imgname>" can be
* arbitrarily long (~PAGE_SIZE). It's done once during rbd map; all
@@ -173,8 +166,8 @@ struct ceph_osdmap {
* the list of osds that store+replicate them. */
struct crush_map *crush;
- struct mutex crush_scratch_mutex;
- int crush_scratch_ary[CEPH_PG_MAX_SIZE * 3];
+ struct mutex crush_workspace_mutex;
+ void *crush_workspace;
};
static inline bool ceph_osd_exists(struct ceph_osdmap *map, int osd)
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
index 5c0da61cb763..5d0018782d50 100644
--- a/include/linux/ceph/rados.h
+++ b/include/linux/ceph/rados.h
@@ -50,7 +50,7 @@ struct ceph_timespec {
#define CEPH_PG_LAYOUT_LINEAR 2
#define CEPH_PG_LAYOUT_HYBRID 3
-#define CEPH_PG_MAX_SIZE 16 /* max # osds in a single pg */
+#define CEPH_PG_MAX_SIZE 32 /* max # osds in a single pg */
/*
* placement group.
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 861b4677fc5b..3c02404cfce9 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -148,14 +148,18 @@ struct cgroup_subsys_state {
* set for a task.
*/
struct css_set {
- /* Reference count */
- atomic_t refcount;
-
/*
- * List running through all cgroup groups in the same hash
- * slot. Protected by css_set_lock
+ * Set of subsystem states, one for each subsystem. This array is
+ * immutable after creation apart from the init_css_set during
+ * subsystem registration (at boot time).
*/
- struct hlist_node hlist;
+ struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
+
+ /* reference count */
+ atomic_t refcount;
+
+ /* the default cgroup associated with this css_set */
+ struct cgroup *dfl_cgrp;
/*
* Lists running through all tasks using this cgroup group.
@@ -167,21 +171,29 @@ struct css_set {
struct list_head tasks;
struct list_head mg_tasks;
+ /* all css_task_iters currently walking this cset */
+ struct list_head task_iters;
+
/*
- * List of cgrp_cset_links pointing at cgroups referenced from this
- * css_set. Protected by css_set_lock.
+ * On the default hierarhcy, ->subsys[ssid] may point to a css
+ * attached to an ancestor instead of the cgroup this css_set is
+ * associated with. The following node is anchored at
+ * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
+ * iterate through all css's attached to a given cgroup.
*/
- struct list_head cgrp_links;
+ struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];
- /* the default cgroup associated with this css_set */
- struct cgroup *dfl_cgrp;
+ /*
+ * List running through all cgroup groups in the same hash
+ * slot. Protected by css_set_lock
+ */
+ struct hlist_node hlist;
/*
- * Set of subsystem states, one for each subsystem. This array is
- * immutable after creation apart from the init_css_set during
- * subsystem registration (at boot time).
+ * List of cgrp_cset_links pointing at cgroups referenced from this
+ * css_set. Protected by css_set_lock.
*/
- struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
+ struct list_head cgrp_links;
/*
* List of csets participating in the on-going migration either as
@@ -201,18 +213,6 @@ struct css_set {
struct cgroup *mg_dst_cgrp;
struct css_set *mg_dst_cset;
- /*
- * On the default hierarhcy, ->subsys[ssid] may point to a css
- * attached to an ancestor instead of the cgroup this css_set is
- * associated with. The following node is anchored at
- * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
- * iterate through all css's attached to a given cgroup.
- */
- struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];
-
- /* all css_task_iters currently walking this cset */
- struct list_head task_iters;
-
/* dead and being drained, ignore for migration */
bool dead;
@@ -388,6 +388,9 @@ struct cftype {
struct list_head node; /* anchored at ss->cfts */
struct kernfs_ops *kf_ops;
+ int (*open)(struct kernfs_open_file *of);
+ void (*release)(struct kernfs_open_file *of);
+
/*
* read_u64() is a shortcut for the common case of returning a
* single integer. Use it in place of read()
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index c83c23f0577b..f6b43fbb141c 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -266,7 +266,7 @@ void css_task_iter_end(struct css_task_iter *it);
* cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
* @leader: the loop cursor
* @dst_css: the destination css
- * @tset: takset to iterate
+ * @tset: taskset to iterate
*
* Iterate threadgroup leaders of @tset. For single-task migrations, @tset
* may not contain any.
diff --git a/include/linux/cgroup_rdma.h b/include/linux/cgroup_rdma.h
new file mode 100644
index 000000000000..e94290b29e99
--- /dev/null
+++ b/include/linux/cgroup_rdma.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2016 Parav Pandit <pandit.parav@gmail.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of the GNU
+ * General Public License. See the file COPYING in the main directory of the
+ * Linux distribution for more details.
+ */
+
+#ifndef _CGROUP_RDMA_H
+#define _CGROUP_RDMA_H
+
+#include <linux/cgroup.h>
+
+enum rdmacg_resource_type {
+ RDMACG_RESOURCE_HCA_HANDLE,
+ RDMACG_RESOURCE_HCA_OBJECT,
+ RDMACG_RESOURCE_MAX,
+};
+
+#ifdef CONFIG_CGROUP_RDMA
+
+struct rdma_cgroup {
+ struct cgroup_subsys_state css;
+
+ /*
+ * head to keep track of all resource pools
+ * that belongs to this cgroup.
+ */
+ struct list_head rpools;
+};
+
+struct rdmacg_device {
+ struct list_head dev_node;
+ struct list_head rpools;
+ char *name;
+};
+
+/*
+ * APIs for RDMA/IB stack to publish when a device wants to
+ * participate in resource accounting
+ */
+int rdmacg_register_device(struct rdmacg_device *device);
+void rdmacg_unregister_device(struct rdmacg_device *device);
+
+/* APIs for RDMA/IB stack to charge/uncharge pool specific resources */
+int rdmacg_try_charge(struct rdma_cgroup **rdmacg,
+ struct rdmacg_device *device,
+ enum rdmacg_resource_type index);
+void rdmacg_uncharge(struct rdma_cgroup *cg,
+ struct rdmacg_device *device,
+ enum rdmacg_resource_type index);
+#endif /* CONFIG_CGROUP_RDMA */
+#endif /* _CGROUP_RDMA_H */
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index 0df0336acee9..d0e597c44585 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -56,6 +56,10 @@ SUBSYS(hugetlb)
SUBSYS(pids)
#endif
+#if IS_ENABLED(CONFIG_CGROUP_RDMA)
+SUBSYS(rdma)
+#endif
+
/*
* The following subsystems are not supported on the default hierarchy.
*/
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 6f0a91b37f68..03f32d0bd1d8 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -29,6 +29,7 @@ extern int __init cma_declare_contiguous(phys_addr_t base,
extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
unsigned int order_per_bit,
struct cma **res_cma);
-extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align);
+extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
+ gfp_t gfp_mask);
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
#endif
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 9e40be522793..aef47be2a5c1 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -711,8 +711,10 @@ int __compat_save_altstack(compat_stack_t __user *, unsigned long);
compat_stack_t __user *__uss = uss; \
struct task_struct *t = current; \
put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \
- put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \
+ put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \
put_user_ex(t->sas_ss_size, &__uss->ss_size); \
+ if (t->sas_ss_flags & SS_AUTODISARM) \
+ sas_ss_reset(t); \
} while (0);
asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index fddd1a5eb322..76e28c229805 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -122,6 +122,7 @@
#define __attribute_const__ __attribute__((__const__))
#define __maybe_unused __attribute__((unused))
#define __always_unused __attribute__((unused))
+#define __mode(x) __attribute__((mode(x)))
/* gcc version specific checks */
@@ -196,6 +197,17 @@
#endif
#endif
+#ifdef CONFIG_STACK_VALIDATION
+#define annotate_unreachable() ({ \
+ asm("%c0:\t\n" \
+ ".pushsection __unreachable, \"a\"\t\n" \
+ ".long %c0b\t\n" \
+ ".popsection\t\n" : : "i" (__LINE__)); \
+})
+#else
+#define annotate_unreachable()
+#endif
+
/*
* Mark a position in code as unreachable. This can be used to
* suppress control flow warnings after asm blocks that transfer
@@ -205,7 +217,8 @@
* this in the preprocessor, but we can live with this because they're
* unreleased. Really, we need to have autoconf for the kernel.
*/
-#define unreachable() __builtin_unreachable()
+#define unreachable() \
+ do { annotate_unreachable(); __builtin_unreachable(); } while (0)
/* Mark a function definition as prohibited from being cloned. */
#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 91c30cba984e..f8110051188f 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -105,29 +105,36 @@ struct ftrace_branch_data {
};
};
+struct ftrace_likely_data {
+ struct ftrace_branch_data data;
+ unsigned long constant;
+};
+
/*
* Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
* to disable branch tracing on a per file basis.
*/
#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
&& !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
-void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+void ftrace_likely_update(struct ftrace_likely_data *f, int val,
+ int expect, int is_constant);
#define likely_notrace(x) __builtin_expect(!!(x), 1)
#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
-#define __branch_check__(x, expect) ({ \
+#define __branch_check__(x, expect, is_constant) ({ \
int ______r; \
- static struct ftrace_branch_data \
+ static struct ftrace_likely_data \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_annotated_branch"))) \
______f = { \
- .func = __func__, \
- .file = __FILE__, \
- .line = __LINE__, \
+ .data.func = __func__, \
+ .data.file = __FILE__, \
+ .data.line = __LINE__, \
}; \
- ______r = likely_notrace(x); \
- ftrace_likely_update(&______f, ______r, expect); \
+ ______r = __builtin_expect(!!(x), expect); \
+ ftrace_likely_update(&______f, ______r, \
+ expect, is_constant); \
______r; \
})
@@ -137,10 +144,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
* written by Daniel Walker.
*/
# ifndef likely
-# define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
+# define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x)))
# endif
# ifndef unlikely
-# define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
+# define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x)))
# endif
#ifdef CONFIG_PROFILE_ALL_BRANCHES
@@ -570,12 +577,4 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
(_________p1); \
})
-/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
-#ifdef CONFIG_KPROBES
-# define __kprobes __attribute__((__section__(".kprobes.text")))
-# define nokprobe_inline __always_inline
-#else
-# define __kprobes
-# define nokprobe_inline inline
-#endif
#endif /* __LINUX_COMPILER_H */
diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h
index be8f12b8f195..fbecbd089d75 100644
--- a/include/linux/crush/crush.h
+++ b/include/linux/crush/crush.h
@@ -135,13 +135,6 @@ struct crush_bucket {
__u32 size; /* num items */
__s32 *items;
- /*
- * cached random permutation: used for uniform bucket and for
- * the linear search fallback for the other bucket types.
- */
- __u32 perm_x; /* @x for which *perm is defined */
- __u32 perm_n; /* num elements of *perm that are permuted/defined */
- __u32 *perm;
};
struct crush_bucket_uniform {
@@ -211,6 +204,21 @@ struct crush_map {
* device fails. */
__u8 chooseleaf_stable;
+ /*
+ * This value is calculated after decode or construction by
+ * the builder. It is exposed here (rather than having a
+ * 'build CRUSH working space' function) so that callers can
+ * reserve a static buffer, allocate space on the stack, or
+ * otherwise avoid calling into the heap allocator if they
+ * want to. The size of the working space depends on the map,
+ * while the size of the scratch vector passed to the mapper
+ * depends on the size of the desired result set.
+ *
+ * Nothing stops the caller from allocating both in one swell
+ * foop and passing in two points, though.
+ */
+ size_t working_size;
+
#ifndef __KERNEL__
/*
* version 0 (original) of straw_calc has various flaws. version 1
@@ -248,4 +256,23 @@ static inline int crush_calc_tree_node(int i)
return ((i+1) << 1)-1;
}
+/*
+ * These data structures are private to the CRUSH implementation. They
+ * are exposed in this header file because builder needs their
+ * definitions to calculate the total working size.
+ *
+ * Moving this out of the crush map allow us to treat the CRUSH map as
+ * immutable within the mapper and removes the requirement for a CRUSH
+ * map lock.
+ */
+struct crush_work_bucket {
+ __u32 perm_x; /* @x for which *perm is defined */
+ __u32 perm_n; /* num elements of *perm that are permuted/defined */
+ __u32 *perm; /* Permutation of the bucket's items */
+};
+
+struct crush_work {
+ struct crush_work_bucket **work; /* Per-bucket working store */
+};
+
#endif
diff --git a/include/linux/crush/mapper.h b/include/linux/crush/mapper.h
index 5dfd5b1125d2..c95e19e1ff11 100644
--- a/include/linux/crush/mapper.h
+++ b/include/linux/crush/mapper.h
@@ -15,6 +15,20 @@ extern int crush_do_rule(const struct crush_map *map,
int ruleno,
int x, int *result, int result_max,
const __u32 *weights, int weight_max,
- int *scratch);
+ void *cwin);
+
+/*
+ * Returns the exact amount of workspace that will need to be used
+ * for a given combination of crush_map and result_max. The caller can
+ * then allocate this much on its own, either on the stack, in a
+ * per-thread long-lived buffer, or however it likes.
+ */
+static inline size_t crush_work_size(const struct crush_map *map,
+ int result_max)
+{
+ return map->working_size + result_max * 3 * sizeof(__u32);
+}
+
+void crush_init_workspace(const struct crush_map *map, void *v);
#endif
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 1e77ff5818f1..d8a3dc042e1c 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -38,8 +38,8 @@ static inline void *dax_radix_locked_entry(sector_t sector, unsigned long flags)
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops);
-int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
- const struct iomap_ops *ops);
+int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
+ const struct iomap_ops *ops);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index);
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
@@ -71,19 +71,13 @@ static inline unsigned int dax_radix_order(void *entry)
return PMD_SHIFT - PAGE_SHIFT;
return 0;
}
-int dax_iomap_pmd_fault(struct vm_fault *vmf, const struct iomap_ops *ops);
#else
static inline unsigned int dax_radix_order(void *entry)
{
return 0;
}
-static inline int dax_iomap_pmd_fault(struct vm_fault *vmf,
- const struct iomap_ops *ops)
-{
- return VM_FAULT_FALLBACK;
-}
#endif
-int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
+int dax_pfn_mkwrite(struct vm_fault *vmf);
static inline bool vma_is_dax(struct vm_area_struct *vma)
{
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index c965e4469499..591b6c16f9c1 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -562,7 +562,7 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper)
* @inode: inode to select the dentry from multiple layers (can be NULL)
* @flags: open flags to control copy-up behavior
*
- * If dentry is on an union/overlay, then return the underlying, real dentry.
+ * If dentry is on a union/overlay, then return the underlying, real dentry.
* Otherwise return the dentry itself.
*
* See also: Documentation/filesystems/vfs.txt
@@ -581,7 +581,7 @@ static inline struct dentry *d_real(struct dentry *dentry,
* d_real_inode - Return the real inode
* @dentry: The dentry to query
*
- * If dentry is on an union/overlay, then return the underlying, real inode.
+ * If dentry is on a union/overlay, then return the underlying, real inode.
* Otherwise return d_inode().
*/
static inline struct inode *d_real_inode(const struct dentry *dentry)
diff --git a/include/linux/device.h b/include/linux/device.h
index bd684fc8ec1d..30c4570e928d 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -925,6 +925,7 @@ struct device {
#ifdef CONFIG_NUMA
int numa_node; /* NUMA node this device is close to */
#endif
+ const struct dma_map_ops *dma_ops;
u64 *dma_mask; /* dma mask (if dma'able device) */
u64 coherent_dma_mask;/* Like dma_mask, but for
alloc_coherent mappings as
@@ -1139,6 +1140,7 @@ static inline bool device_supports_offline(struct device *dev)
extern void lock_device_hotplug(void);
extern void unlock_device_hotplug(void);
extern int lock_device_hotplug_sysfs(void);
+void assert_held_device_hotplug(void);
extern int device_offline(struct device *dev);
extern int device_online(struct device *dev);
extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index fec734df1524..b67bf6ac907d 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -112,7 +112,7 @@ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
}
struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
- unsigned int order);
+ unsigned int order, gfp_t gfp_mask);
bool dma_release_from_contiguous(struct device *dev, struct page *pages,
int count);
@@ -145,7 +145,7 @@ int dma_declare_contiguous(struct device *dev, phys_addr_t size,
static inline
struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
- unsigned int order)
+ unsigned int order, gfp_t gfp_mask)
{
return NULL;
}
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index c24721a33b4c..0977317c6835 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -134,7 +134,8 @@ struct dma_map_ops {
int is_phys;
};
-extern struct dma_map_ops dma_noop_ops;
+extern const struct dma_map_ops dma_noop_ops;
+extern const struct dma_map_ops dma_virt_ops;
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
@@ -171,14 +172,26 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
#ifdef CONFIG_HAS_DMA
#include <asm/dma-mapping.h>
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ if (dev && dev->dma_ops)
+ return dev->dma_ops;
+ return get_arch_dma_ops(dev ? dev->bus : NULL);
+}
+
+static inline void set_dma_ops(struct device *dev,
+ const struct dma_map_ops *dma_ops)
+{
+ dev->dma_ops = dma_ops;
+}
#else
/*
* Define the dma api to allow compilation but not linking of
* dma dependent code. Code that depends on the dma-mapping
* API needs to set 'depends on HAS_DMA' in its Kconfig
*/
-extern struct dma_map_ops bad_dma_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+extern const struct dma_map_ops bad_dma_ops;
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
return &bad_dma_ops;
}
@@ -189,7 +202,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
enum dma_data_direction dir,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr;
kmemcheck_mark_initialized(ptr, size);
@@ -208,7 +221,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
enum dma_data_direction dir,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_page)
@@ -224,7 +237,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
int i, ents;
struct scatterlist *s;
@@ -242,7 +255,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
int nents, enum dma_data_direction dir,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
debug_dma_unmap_sg(dev, sg, nents, dir);
@@ -256,7 +269,7 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
enum dma_data_direction dir,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr;
kmemcheck_mark_initialized(page_address(page) + offset, size);
@@ -272,7 +285,7 @@ static inline void dma_unmap_page_attrs(struct device *dev,
enum dma_data_direction dir,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_page)
@@ -286,7 +299,7 @@ static inline dma_addr_t dma_map_resource(struct device *dev,
enum dma_data_direction dir,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr;
BUG_ON(!valid_dma_direction(dir));
@@ -307,7 +320,7 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_resource)
@@ -319,7 +332,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
size_t size,
enum dma_data_direction dir)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_single_for_cpu)
@@ -331,7 +344,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size,
enum dma_data_direction dir)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_single_for_device)
@@ -371,7 +384,7 @@ static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_sg_for_cpu)
@@ -383,7 +396,7 @@ static inline void
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_sg_for_device)
@@ -428,7 +441,7 @@ static inline int
dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
dma_addr_t dma_addr, size_t size, unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
if (ops->mmap)
return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
@@ -446,7 +459,7 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
if (ops->get_sgtable)
return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
@@ -464,7 +477,7 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
void *cpu_addr;
BUG_ON(!ops);
@@ -486,7 +499,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
WARN_ON(irqs_disabled());
@@ -544,7 +557,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
#ifndef HAVE_ARCH_DMA_SUPPORTED
static inline int dma_supported(struct device *dev, u64 mask)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
if (!ops)
return 0;
@@ -557,7 +570,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
#ifndef HAVE_ARCH_DMA_SET_MASK
static inline int dma_set_mask(struct device *dev, u64 mask)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
if (ops->set_dma_mask)
return ops->set_dma_mask(dev, mask);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c930cbc19342..c64f2cb7d364 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -655,6 +655,11 @@ struct inode {
void *i_private; /* fs or device private pointer */
};
+static inline unsigned int i_blocksize(const struct inode *node)
+{
+ return (1 << node->i_blkbits);
+}
+
static inline int inode_unhashed(struct inode *inode)
{
return hlist_unhashed(&inode->i_hash);
diff --git a/include/linux/fsl-diu-fb.h b/include/linux/fsl-diu-fb.h
index a1e8277120c7..c46eab5bc893 100644
--- a/include/linux/fsl-diu-fb.h
+++ b/include/linux/fsl-diu-fb.h
@@ -73,7 +73,7 @@ struct diu_ad {
/* Word 0(32-bit) in DDR memory */
/* __u16 comp; */
/* __u16 pixel_s:2; */
-/* __u16 pallete:1; */
+/* __u16 palette:1; */
/* __u16 red_c:2; */
/* __u16 green_c:2; */
/* __u16 blue_c:2; */
@@ -142,7 +142,7 @@ struct diu_ad {
struct diu {
__be32 desc[3];
__be32 gamma;
- __be32 pallete;
+ __be32 palette;
__be32 cursor;
__be32 curs_pos;
__be32 diu_mode;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 0fe0b6295ab5..db373b9d3223 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -541,7 +541,7 @@ static inline bool pm_suspended_storage(void)
#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
/* The below functions must be run on a range from a single zone. */
extern int alloc_contig_range(unsigned long start, unsigned long end,
- unsigned migratetype);
+ unsigned migratetype, gfp_t gfp_mask);
extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
#endif
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index f0029e786205..a3762d49ba39 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -6,6 +6,18 @@ extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
struct vm_area_struct *vma);
extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
+extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
+ struct vm_area_struct *vma);
+
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
+#else
+static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
+{
+}
+#endif
+
extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
unsigned long addr,
@@ -17,6 +29,9 @@ extern bool madvise_free_huge_pmd(struct mmu_gather *tlb,
extern int zap_huge_pmd(struct mmu_gather *tlb,
struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr);
+extern int zap_huge_pud(struct mmu_gather *tlb,
+ struct vm_area_struct *vma,
+ pud_t *pud, unsigned long addr);
extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
unsigned char *vec);
@@ -26,8 +41,10 @@ extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot,
int prot_numa);
-int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
- pfn_t pfn, bool write);
+int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmd, pfn_t pfn, bool write);
+int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
+ pud_t *pud, pfn_t pfn, bool write);
enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_FLAG,
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
@@ -58,13 +75,14 @@ extern struct kobj_attribute shmem_enabled_attr;
#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd, int flags);
-
#define HPAGE_PMD_SHIFT PMD_SHIFT
#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
+#define HPAGE_PUD_SHIFT PUD_SHIFT
+#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
+#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
+
extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
#define transparent_hugepage_enabled(__vma) \
@@ -118,6 +136,17 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
bool freeze, struct page *page);
+void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
+ unsigned long address);
+
+#define split_huge_pud(__vma, __pud, __address) \
+ do { \
+ pud_t *____pud = (__pud); \
+ if (pud_trans_huge(*____pud) \
+ || pud_devmap(*____pud)) \
+ __split_huge_pud(__vma, __pud, __address); \
+ } while (0)
+
extern int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice);
extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
@@ -126,6 +155,8 @@ extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
long adjust_next);
extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma);
+extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
+ struct vm_area_struct *vma);
/* mmap_sem must be held on entry */
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
@@ -136,6 +167,15 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
else
return NULL;
}
+static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
+ struct vm_area_struct *vma)
+{
+ VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
+ if (pud_trans_huge(*pud) || pud_devmap(*pud))
+ return __pud_trans_huge_lock(pud, vma);
+ else
+ return NULL;
+}
static inline int hpage_nr_pages(struct page *page)
{
if (unlikely(PageTransHuge(page)))
@@ -143,6 +183,11 @@ static inline int hpage_nr_pages(struct page *page)
return 1;
}
+struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmd, int flags);
+struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
+ pud_t *pud, int flags);
+
extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
extern struct page *huge_zero_page;
@@ -157,6 +202,11 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
return is_huge_zero_page(pmd_page(pmd));
}
+static inline bool is_huge_zero_pud(pud_t pud)
+{
+ return false;
+}
+
struct page *mm_get_huge_zero_page(struct mm_struct *mm);
void mm_put_huge_zero_page(struct mm_struct *mm);
@@ -167,6 +217,10 @@ void mm_put_huge_zero_page(struct mm_struct *mm);
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
+#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
+#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
+#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
+
#define hpage_nr_pages(x) 1
#define transparent_hugepage_enabled(__vma) 0
@@ -195,6 +249,9 @@ static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
static inline void split_huge_pmd_address(struct vm_area_struct *vma,
unsigned long address, bool freeze, struct page *page) {}
+#define split_huge_pud(__vma, __pmd, __address) \
+ do { } while (0)
+
static inline int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice)
{
@@ -212,6 +269,11 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
{
return NULL;
}
+static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
+ struct vm_area_struct *vma)
+{
+ return NULL;
+}
static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd)
{
@@ -223,6 +285,11 @@ static inline bool is_huge_zero_page(struct page *page)
return false;
}
+static inline bool is_huge_zero_pud(pud_t pud)
+{
+ return false;
+}
+
static inline void mm_put_huge_zero_page(struct mm_struct *mm)
{
return;
@@ -233,6 +300,12 @@ static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
{
return NULL;
}
+
+static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
+ unsigned long addr, pud_t *pud, int flags)
+{
+ return NULL;
+}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* _LINUX_HUGE_MM_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 7b23a3316dcb..bed8fbb45f31 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -283,6 +283,7 @@ enum i2c_slave_event {
extern int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb);
extern int i2c_slave_unregister(struct i2c_client *client);
+extern bool i2c_detect_slave_mode(struct device *dev);
static inline int i2c_slave_event(struct i2c_client *client,
enum i2c_slave_event event, u8 *val)
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 3c01b89aed67..bf70b3ef0a07 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -12,47 +12,29 @@
#ifndef __IDR_H__
#define __IDR_H__
-#include <linux/types.h>
-#include <linux/bitops.h>
-#include <linux/init.h>
-#include <linux/rcupdate.h>
+#include <linux/radix-tree.h>
+#include <linux/gfp.h>
+#include <linux/percpu.h>
+
+struct idr {
+ struct radix_tree_root idr_rt;
+ unsigned int idr_next;
+};
/*
- * Using 6 bits at each layer allows us to allocate 7 layers out of each page.
- * 8 bits only gave us 3 layers out of every pair of pages, which is less
- * efficient except for trees with a largest element between 192-255 inclusive.
+ * The IDR API does not expose the tagging functionality of the radix tree
+ * to users. Use tag 0 to track whether a node has free space below it.
*/
-#define IDR_BITS 6
-#define IDR_SIZE (1 << IDR_BITS)
-#define IDR_MASK ((1 << IDR_BITS)-1)
-
-struct idr_layer {
- int prefix; /* the ID prefix of this idr_layer */
- int layer; /* distance from leaf */
- struct idr_layer __rcu *ary[1<<IDR_BITS];
- int count; /* When zero, we can release it */
- union {
- /* A zero bit means "space here" */
- DECLARE_BITMAP(bitmap, IDR_SIZE);
- struct rcu_head rcu_head;
- };
-};
+#define IDR_FREE 0
-struct idr {
- struct idr_layer __rcu *hint; /* the last layer allocated from */
- struct idr_layer __rcu *top;
- int layers; /* only valid w/o concurrent changes */
- int cur; /* current pos for cyclic allocation */
- spinlock_t lock;
- int id_free_cnt;
- struct idr_layer *id_free;
-};
+/* Set the IDR flag and the IDR_FREE tag */
+#define IDR_RT_MARKER ((__force gfp_t)(3 << __GFP_BITS_SHIFT))
-#define IDR_INIT(name) \
+#define IDR_INIT \
{ \
- .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
+ .idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER) \
}
-#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
+#define DEFINE_IDR(name) struct idr name = IDR_INIT
/**
* idr_get_cursor - Return the current position of the cyclic allocator
@@ -62,9 +44,9 @@ struct idr {
* idr_alloc_cyclic() if it is free (otherwise the search will start from
* this position).
*/
-static inline unsigned int idr_get_cursor(struct idr *idr)
+static inline unsigned int idr_get_cursor(const struct idr *idr)
{
- return READ_ONCE(idr->cur);
+ return READ_ONCE(idr->idr_next);
}
/**
@@ -77,7 +59,7 @@ static inline unsigned int idr_get_cursor(struct idr *idr)
*/
static inline void idr_set_cursor(struct idr *idr, unsigned int val)
{
- WRITE_ONCE(idr->cur, val);
+ WRITE_ONCE(idr->idr_next, val);
}
/**
@@ -97,22 +79,31 @@ static inline void idr_set_cursor(struct idr *idr, unsigned int val)
* period).
*/
-/*
- * This is what we export.
- */
-
-void *idr_find_slowpath(struct idr *idp, int id);
void idr_preload(gfp_t gfp_mask);
-int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask);
-int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask);
-int idr_for_each(struct idr *idp,
+int idr_alloc(struct idr *, void *entry, int start, int end, gfp_t);
+int idr_alloc_cyclic(struct idr *, void *entry, int start, int end, gfp_t);
+int idr_for_each(const struct idr *,
int (*fn)(int id, void *p, void *data), void *data);
-void *idr_get_next(struct idr *idp, int *nextid);
-void *idr_replace(struct idr *idp, void *ptr, int id);
-void idr_remove(struct idr *idp, int id);
-void idr_destroy(struct idr *idp);
-void idr_init(struct idr *idp);
-bool idr_is_empty(struct idr *idp);
+void *idr_get_next(struct idr *, int *nextid);
+void *idr_replace(struct idr *, void *, int id);
+void idr_destroy(struct idr *);
+
+static inline void *idr_remove(struct idr *idr, int id)
+{
+ return radix_tree_delete_item(&idr->idr_rt, id, NULL);
+}
+
+static inline void idr_init(struct idr *idr)
+{
+ INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
+ idr->idr_next = 0;
+}
+
+static inline bool idr_is_empty(const struct idr *idr)
+{
+ return radix_tree_empty(&idr->idr_rt) &&
+ radix_tree_tagged(&idr->idr_rt, IDR_FREE);
+}
/**
* idr_preload_end - end preload section started with idr_preload()
@@ -137,19 +128,14 @@ static inline void idr_preload_end(void)
* This function can be called under rcu_read_lock(), given that the leaf
* pointers lifetimes are correctly managed.
*/
-static inline void *idr_find(struct idr *idr, int id)
+static inline void *idr_find(const struct idr *idr, int id)
{
- struct idr_layer *hint = rcu_dereference_raw(idr->hint);
-
- if (hint && (id & ~IDR_MASK) == hint->prefix)
- return rcu_dereference_raw(hint->ary[id & IDR_MASK]);
-
- return idr_find_slowpath(idr, id);
+ return radix_tree_lookup(&idr->idr_rt, id);
}
/**
* idr_for_each_entry - iterate over an idr's elements of a given type
- * @idp: idr handle
+ * @idr: idr handle
* @entry: the type * to use as cursor
* @id: id entry's key
*
@@ -157,57 +143,60 @@ static inline void *idr_find(struct idr *idr, int id)
* after normal terminatinon @entry is left with the value NULL. This
* is convenient for a "not found" value.
*/
-#define idr_for_each_entry(idp, entry, id) \
- for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id)
+#define idr_for_each_entry(idr, entry, id) \
+ for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id)
/**
- * idr_for_each_entry - continue iteration over an idr's elements of a given type
- * @idp: idr handle
+ * idr_for_each_entry_continue - continue iteration over an idr's elements of a given type
+ * @idr: idr handle
* @entry: the type * to use as cursor
* @id: id entry's key
*
* Continue to iterate over list of given type, continuing after
* the current position.
*/
-#define idr_for_each_entry_continue(idp, entry, id) \
- for ((entry) = idr_get_next((idp), &(id)); \
+#define idr_for_each_entry_continue(idr, entry, id) \
+ for ((entry) = idr_get_next((idr), &(id)); \
entry; \
- ++id, (entry) = idr_get_next((idp), &(id)))
+ ++id, (entry) = idr_get_next((idr), &(id)))
/*
* IDA - IDR based id allocator, use when translation from id to
* pointer isn't necessary.
- *
- * IDA_BITMAP_LONGS is calculated to be one less to accommodate
- * ida_bitmap->nr_busy so that the whole struct fits in 128 bytes.
*/
#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */
-#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long) - 1)
+#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long))
#define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8)
struct ida_bitmap {
- long nr_busy;
unsigned long bitmap[IDA_BITMAP_LONGS];
};
+DECLARE_PER_CPU(struct ida_bitmap *, ida_bitmap);
+
struct ida {
- struct idr idr;
- struct ida_bitmap *free_bitmap;
+ struct radix_tree_root ida_rt;
};
-#define IDA_INIT(name) { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, }
-#define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
+#define IDA_INIT { \
+ .ida_rt = RADIX_TREE_INIT(IDR_RT_MARKER | GFP_NOWAIT), \
+}
+#define DEFINE_IDA(name) struct ida name = IDA_INIT
int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
void ida_remove(struct ida *ida, int id);
void ida_destroy(struct ida *ida);
-void ida_init(struct ida *ida);
int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
gfp_t gfp_mask);
void ida_simple_remove(struct ida *ida, unsigned int id);
+static inline void ida_init(struct ida *ida)
+{
+ INIT_RADIX_TREE(&ida->ida_rt, IDR_RT_MARKER | GFP_NOWAIT);
+}
+
/**
* ida_get_new - allocate new ID
* @ida: idr handle
@@ -220,11 +209,8 @@ static inline int ida_get_new(struct ida *ida, int *p_id)
return ida_get_new_above(ida, 0, p_id);
}
-static inline bool ida_is_empty(struct ida *ida)
+static inline bool ida_is_empty(const struct ida *ida)
{
- return idr_is_empty(&ida->idr);
+ return radix_tree_empty(&ida->ida_rt);
}
-
-void __init idr_init_cache(void);
-
#endif /* __IDR_H__ */
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 891459caa278..7291810067eb 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -79,8 +79,7 @@ int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
bool *did_zero, const struct iomap_ops *ops);
int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
const struct iomap_ops *ops);
-int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
- const struct iomap_ops *ops);
+int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops);
int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
loff_t start, loff_t len, const struct iomap_ops *ops);
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
index 1c30014ed176..d29e1e21bf3f 100644
--- a/include/linux/iopoll.h
+++ b/include/linux/iopoll.h
@@ -17,7 +17,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/hrtimer.h>
+#include <linux/ktime.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/io.h>
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 78c5d5ae3857..f1045b2c6a00 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -100,7 +100,7 @@ struct ipmi_user_hndl {
/* Create a new user of the IPMI layer on the given interface number. */
int ipmi_create_user(unsigned int if_num,
- struct ipmi_user_hndl *handler,
+ const struct ipmi_user_hndl *handler,
void *handler_data,
ipmi_user_t *user);
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index b63d6b7b0db0..8e06d758ee48 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -89,11 +89,17 @@ extern bool static_key_initialized;
struct static_key {
atomic_t enabled;
-/* Set lsb bit to 1 if branch is default true, 0 ot */
- struct jump_entry *entries;
-#ifdef CONFIG_MODULES
- struct static_key_mod *next;
-#endif
+/*
+ * bit 0 => 1 if key is initially true
+ * 0 if initially false
+ * bit 1 => 1 if points to struct static_key_mod
+ * 0 if points to struct jump_entry
+ */
+ union {
+ unsigned long type;
+ struct jump_entry *entries;
+ struct static_key_mod *next;
+ };
};
#else
@@ -118,9 +124,10 @@ struct module;
#ifdef HAVE_JUMP_LABEL
-#define JUMP_TYPE_FALSE 0UL
-#define JUMP_TYPE_TRUE 1UL
-#define JUMP_TYPE_MASK 1UL
+#define JUMP_TYPE_FALSE 0UL
+#define JUMP_TYPE_TRUE 1UL
+#define JUMP_TYPE_LINKED 2UL
+#define JUMP_TYPE_MASK 3UL
static __always_inline bool static_key_false(struct static_key *key)
{
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 820c0ad54a01..c908b25bf5a5 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -52,7 +52,7 @@ void kasan_free_pages(struct page *page, unsigned int order);
void kasan_cache_create(struct kmem_cache *cache, size_t *size,
unsigned long *flags);
void kasan_cache_shrink(struct kmem_cache *cache);
-void kasan_cache_destroy(struct kmem_cache *cache);
+void kasan_cache_shutdown(struct kmem_cache *cache);
void kasan_poison_slab(struct page *page);
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
@@ -98,7 +98,7 @@ static inline void kasan_cache_create(struct kmem_cache *cache,
size_t *size,
unsigned long *flags) {}
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
-static inline void kasan_cache_destroy(struct kmem_cache *cache) {}
+static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
static inline void kasan_poison_slab(struct page *page) {}
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
index 8f2e059e4d45..4d748603e818 100644
--- a/include/linux/kconfig.h
+++ b/include/linux/kconfig.h
@@ -8,7 +8,7 @@
/*
* The use of "&&" / "||" is limited in certain expressions.
- * The followings enable to calculate "and" / "or" with macro expansion only.
+ * The following enable to calculate "and" / "or" with macro expansion only.
*/
#define __and(x, y) ___and(x, y)
#define ___and(x, y) ____and(__ARG_PLACEHOLDER_##x, y)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index cb09238f6d32..4c26dc3a8295 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -100,16 +100,18 @@
)
/*
- * Divide positive or negative dividend by positive divisor and round
- * to closest integer. Result is undefined for negative divisors and
- * for negative dividends if the divisor variable type is unsigned.
+ * Divide positive or negative dividend by positive or negative divisor
+ * and round to closest integer. Result is undefined for negative
+ * divisors if he dividend variable type is unsigned and for negative
+ * dividends if the divisor variable type is unsigned.
*/
#define DIV_ROUND_CLOSEST(x, divisor)( \
{ \
typeof(x) __x = x; \
typeof(divisor) __d = divisor; \
(((typeof(x))-1) > 0 || \
- ((typeof(divisor))-1) > 0 || (__x) > 0) ? \
+ ((typeof(divisor))-1) > 0 || \
+ (((__x) > 0) == ((__d) > 0))) ? \
(((__x) + ((__d) / 2)) / (__d)) : \
(((__x) - ((__d) / 2)) / (__d)); \
} \
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 7056238fd9f5..a9b11b8d06f2 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -46,6 +46,7 @@ enum kernfs_node_flag {
KERNFS_SUICIDAL = 0x0400,
KERNFS_SUICIDED = 0x0800,
KERNFS_EMPTY_DIR = 0x1000,
+ KERNFS_HAS_RELEASE = 0x2000,
};
/* @flags for kernfs_create_root() */
@@ -175,6 +176,7 @@ struct kernfs_open_file {
/* published fields */
struct kernfs_node *kn;
struct file *file;
+ struct seq_file *seq_file;
void *priv;
/* private fields, do not use outside kernfs proper */
@@ -185,12 +187,20 @@ struct kernfs_open_file {
char *prealloc_buf;
size_t atomic_write_len;
- bool mmapped;
+ bool mmapped:1;
+ bool released:1;
const struct vm_operations_struct *vm_ops;
};
struct kernfs_ops {
/*
+ * Optional open/release methods. Both are called with
+ * @of->seq_file populated.
+ */
+ int (*open)(struct kernfs_open_file *of);
+ void (*release)(struct kernfs_open_file *of);
+
+ /*
* Read is handled by either seq_file or raw_read().
*
* If seq_show() is present, seq_file path is active. Other seq
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 16ddfb8b304a..c328e4f7dcad 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -29,7 +29,7 @@
* <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
* <prasanna@in.ibm.com> added function-return probes.
*/
-#include <linux/compiler.h> /* for __kprobes */
+#include <linux/compiler.h>
#include <linux/linkage.h>
#include <linux/list.h>
#include <linux/notifier.h>
@@ -40,9 +40,9 @@
#include <linux/rcupdate.h>
#include <linux/mutex.h>
#include <linux/ftrace.h>
+#include <asm/kprobes.h>
#ifdef CONFIG_KPROBES
-#include <asm/kprobes.h>
/* kprobe_status settings */
#define KPROBE_HIT_ACTIVE 0x00000001
@@ -51,6 +51,7 @@
#define KPROBE_HIT_SSDONE 0x00000008
#else /* CONFIG_KPROBES */
+#include <asm-generic/kprobes.h>
typedef int kprobe_opcode_t;
struct arch_specific_insn {
int dummy;
@@ -509,18 +510,4 @@ static inline bool is_kprobe_optinsn_slot(unsigned long addr)
}
#endif
-#ifdef CONFIG_KPROBES
-/*
- * Blacklist ganerating macro. Specify functions which is not probed
- * by using this macro.
- */
-#define __NOKPROBE_SYMBOL(fname) \
-static unsigned long __used \
- __attribute__((section("_kprobe_blacklist"))) \
- _kbl_addr_##fname = (unsigned long)fname;
-#define NOKPROBE_SYMBOL(fname) __NOKPROBE_SYMBOL(fname)
-#else
-#define NOKPROBE_SYMBOL(fname)
-#endif
-
#endif /* _LINUX_KPROBES_H */
diff --git a/include/linux/lz4.h b/include/linux/lz4.h
index 6b784c59f321..394e3d9213b8 100644
--- a/include/linux/lz4.h
+++ b/include/linux/lz4.h
@@ -1,87 +1,648 @@
-#ifndef __LZ4_H__
-#define __LZ4_H__
-/*
- * LZ4 Kernel Interface
+/* LZ4 Kernel Interface
*
* Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com>
+ * Copyright (C) 2016, Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
+ *
+ * This file is based on the original header file
+ * for LZ4 - Fast LZ compression algorithm.
+ *
+ * LZ4 - Fast LZ compression algorithm
+ * Copyright (C) 2011-2016, Yann Collet.
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * You can contact the author at :
+ * - LZ4 homepage : http://www.lz4.org
+ * - LZ4 source repository : https://github.com/lz4/lz4
*/
-#define LZ4_MEM_COMPRESS (16384)
-#define LZ4HC_MEM_COMPRESS (262144 + (2 * sizeof(unsigned char *)))
+#ifndef __LZ4_H__
+#define __LZ4_H__
+
+#include <linux/types.h>
+#include <linux/string.h> /* memset, memcpy */
+
+/*-************************************************************************
+ * CONSTANTS
+ **************************************************************************/
/*
- * lz4_compressbound()
- * Provides the maximum size that LZ4 may output in a "worst case" scenario
- * (input data not compressible)
+ * LZ4_MEMORY_USAGE :
+ * Memory usage formula : N->2^N Bytes
+ * (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+ * Increasing memory usage improves compression ratio
+ * Reduced memory usage can improve speed, due to cache effect
+ * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
*/
-static inline size_t lz4_compressbound(size_t isize)
-{
- return isize + (isize / 255) + 16;
-}
+#define LZ4_MEMORY_USAGE 14
+
+#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */
+#define LZ4_COMPRESSBOUND(isize) (\
+ (unsigned int)(isize) > (unsigned int)LZ4_MAX_INPUT_SIZE \
+ ? 0 \
+ : (isize) + ((isize)/255) + 16)
+
+#define LZ4_ACCELERATION_DEFAULT 1
+#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
+#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
+#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG)
+
+#define LZ4HC_MIN_CLEVEL 3
+#define LZ4HC_DEFAULT_CLEVEL 9
+#define LZ4HC_MAX_CLEVEL 16
+
+#define LZ4HC_DICTIONARY_LOGSIZE 16
+#define LZ4HC_MAXD (1<<LZ4HC_DICTIONARY_LOGSIZE)
+#define LZ4HC_MAXD_MASK (LZ4HC_MAXD - 1)
+#define LZ4HC_HASH_LOG (LZ4HC_DICTIONARY_LOGSIZE - 1)
+#define LZ4HC_HASHTABLESIZE (1 << LZ4HC_HASH_LOG)
+#define LZ4HC_HASH_MASK (LZ4HC_HASHTABLESIZE - 1)
+
+/*-************************************************************************
+ * STREAMING CONSTANTS AND STRUCTURES
+ **************************************************************************/
+#define LZ4_STREAMSIZE_U64 ((1 << (LZ4_MEMORY_USAGE - 3)) + 4)
+#define LZ4_STREAMSIZE (LZ4_STREAMSIZE_U64 * sizeof(unsigned long long))
+
+#define LZ4_STREAMHCSIZE 262192
+#define LZ4_STREAMHCSIZE_SIZET (262192 / sizeof(size_t))
+
+#define LZ4_STREAMDECODESIZE_U64 4
+#define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U64 * \
+ sizeof(unsigned long long))
/*
- * lz4_compress()
- * src : source address of the original data
- * src_len : size of the original data
- * dst : output buffer address of the compressed data
- * This requires 'dst' of size LZ4_COMPRESSBOUND.
- * dst_len : is the output size, which is returned after compress done
- * workmem : address of the working memory.
- * This requires 'workmem' of size LZ4_MEM_COMPRESS.
- * return : Success if return 0
- * Error if return (< 0)
- * note : Destination buffer and workmem must be already allocated with
- * the defined size.
- */
-int lz4_compress(const unsigned char *src, size_t src_len,
- unsigned char *dst, size_t *dst_len, void *wrkmem);
-
- /*
- * lz4hc_compress()
- * src : source address of the original data
- * src_len : size of the original data
- * dst : output buffer address of the compressed data
- * This requires 'dst' of size LZ4_COMPRESSBOUND.
- * dst_len : is the output size, which is returned after compress done
- * workmem : address of the working memory.
- * This requires 'workmem' of size LZ4HC_MEM_COMPRESS.
- * return : Success if return 0
- * Error if return (< 0)
- * note : Destination buffer and workmem must be already allocated with
- * the defined size.
- */
-int lz4hc_compress(const unsigned char *src, size_t src_len,
- unsigned char *dst, size_t *dst_len, void *wrkmem);
+ * LZ4_stream_t - information structure to track an LZ4 stream.
+ */
+typedef struct {
+ uint32_t hashTable[LZ4_HASH_SIZE_U32];
+ uint32_t currentOffset;
+ uint32_t initCheck;
+ const uint8_t *dictionary;
+ uint8_t *bufferStart;
+ uint32_t dictSize;
+} LZ4_stream_t_internal;
+typedef union {
+ unsigned long long table[LZ4_STREAMSIZE_U64];
+ LZ4_stream_t_internal internal_donotuse;
+} LZ4_stream_t;
/*
- * lz4_decompress()
- * src : source address of the compressed data
- * src_len : is the input size, whcih is returned after decompress done
- * dest : output buffer address of the decompressed data
- * actual_dest_len: is the size of uncompressed data, supposing it's known
- * return : Success if return 0
- * Error if return (< 0)
- * note : Destination buffer must be already allocated.
- * slightly faster than lz4_decompress_unknownoutputsize()
- */
-int lz4_decompress(const unsigned char *src, size_t *src_len,
- unsigned char *dest, size_t actual_dest_len);
+ * LZ4_streamHC_t - information structure to track an LZ4HC stream.
+ */
+typedef struct {
+ unsigned int hashTable[LZ4HC_HASHTABLESIZE];
+ unsigned short chainTable[LZ4HC_MAXD];
+ /* next block to continue on current prefix */
+ const unsigned char *end;
+ /* All index relative to this position */
+ const unsigned char *base;
+ /* alternate base for extDict */
+ const unsigned char *dictBase;
+ /* below that point, need extDict */
+ unsigned int dictLimit;
+ /* below that point, no more dict */
+ unsigned int lowLimit;
+ /* index from which to continue dict update */
+ unsigned int nextToUpdate;
+ unsigned int compressionLevel;
+} LZ4HC_CCtx_internal;
+typedef union {
+ size_t table[LZ4_STREAMHCSIZE_SIZET];
+ LZ4HC_CCtx_internal internal_donotuse;
+} LZ4_streamHC_t;
/*
- * lz4_decompress_unknownoutputsize()
- * src : source address of the compressed data
- * src_len : is the input size, therefore the compressed size
- * dest : output buffer address of the decompressed data
- * dest_len: is the max size of the destination buffer, which is
- * returned with actual size of decompressed data after
- * decompress done
- * return : Success if return 0
- * Error if return (< 0)
- * note : Destination buffer must be already allocated.
- */
-int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len,
- unsigned char *dest, size_t *dest_len);
+ * LZ4_streamDecode_t - information structure to track an
+ * LZ4 stream during decompression.
+ *
+ * init this structure using LZ4_setStreamDecode (or memset()) before first use
+ */
+typedef struct {
+ const uint8_t *externalDict;
+ size_t extDictSize;
+ const uint8_t *prefixEnd;
+ size_t prefixSize;
+} LZ4_streamDecode_t_internal;
+typedef union {
+ unsigned long long table[LZ4_STREAMDECODESIZE_U64];
+ LZ4_streamDecode_t_internal internal_donotuse;
+} LZ4_streamDecode_t;
+
+/*-************************************************************************
+ * SIZE OF STATE
+ **************************************************************************/
+#define LZ4_MEM_COMPRESS LZ4_STREAMSIZE
+#define LZ4HC_MEM_COMPRESS LZ4_STREAMHCSIZE
+
+/*-************************************************************************
+ * Compression Functions
+ **************************************************************************/
+
+/**
+ * LZ4_compressBound() - Max. output size in worst case szenarios
+ * @isize: Size of the input data
+ *
+ * Return: Max. size LZ4 may output in a "worst case" szenario
+ * (data not compressible)
+ */
+static inline int LZ4_compressBound(size_t isize)
+{
+ return LZ4_COMPRESSBOUND(isize);
+}
+
+/**
+ * LZ4_compress_default() - Compress data from source to dest
+ * @source: source address of the original data
+ * @dest: output buffer address of the compressed data
+ * @inputSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE
+ * @maxOutputSize: full or partial size of buffer 'dest'
+ * which must be already allocated
+ * @wrkmem: address of the working memory.
+ * This requires 'workmem' of LZ4_MEM_COMPRESS.
+ *
+ * Compresses 'sourceSize' bytes from buffer 'source'
+ * into already allocated 'dest' buffer of size 'maxOutputSize'.
+ * Compression is guaranteed to succeed if
+ * 'maxOutputSize' >= LZ4_compressBound(inputSize).
+ * It also runs faster, so it's a recommended setting.
+ * If the function cannot compress 'source' into a more limited 'dest' budget,
+ * compression stops *immediately*, and the function result is zero.
+ * As a consequence, 'dest' content is not valid.
+ *
+ * Return: Number of bytes written into buffer 'dest'
+ * (necessarily <= maxOutputSize) or 0 if compression fails
+ */
+int LZ4_compress_default(const char *source, char *dest, int inputSize,
+ int maxOutputSize, void *wrkmem);
+
+/**
+ * LZ4_compress_fast() - As LZ4_compress_default providing an acceleration param
+ * @source: source address of the original data
+ * @dest: output buffer address of the compressed data
+ * @inputSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE
+ * @maxOutputSize: full or partial size of buffer 'dest'
+ * which must be already allocated
+ * @acceleration: acceleration factor
+ * @wrkmem: address of the working memory.
+ * This requires 'workmem' of LZ4_MEM_COMPRESS.
+ *
+ * Same as LZ4_compress_default(), but allows to select an "acceleration"
+ * factor. The larger the acceleration value, the faster the algorithm,
+ * but also the lesser the compression. It's a trade-off. It can be fine tuned,
+ * with each successive value providing roughly +~3% to speed.
+ * An acceleration value of "1" is the same as regular LZ4_compress_default()
+ * Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT, which is 1.
+ *
+ * Return: Number of bytes written into buffer 'dest'
+ * (necessarily <= maxOutputSize) or 0 if compression fails
+ */
+int LZ4_compress_fast(const char *source, char *dest, int inputSize,
+ int maxOutputSize, int acceleration, void *wrkmem);
+
+/**
+ * LZ4_compress_destSize() - Compress as much data as possible
+ * from source to dest
+ * @source: source address of the original data
+ * @dest: output buffer address of the compressed data
+ * @sourceSizePtr: will be modified to indicate how many bytes where read
+ * from 'source' to fill 'dest'. New value is necessarily <= old value.
+ * @targetDestSize: Size of buffer 'dest' which must be already allocated
+ * @wrkmem: address of the working memory.
+ * This requires 'workmem' of LZ4_MEM_COMPRESS.
+ *
+ * Reverse the logic, by compressing as much data as possible
+ * from 'source' buffer into already allocated buffer 'dest'
+ * of size 'targetDestSize'.
+ * This function either compresses the entire 'source' content into 'dest'
+ * if it's large enough, or fill 'dest' buffer completely with as much data as
+ * possible from 'source'.
+ *
+ * Return: Number of bytes written into 'dest' (necessarily <= targetDestSize)
+ * or 0 if compression fails
+ */
+int LZ4_compress_destSize(const char *source, char *dest, int *sourceSizePtr,
+ int targetDestSize, void *wrkmem);
+
+/*-************************************************************************
+ * Decompression Functions
+ **************************************************************************/
+
+/**
+ * LZ4_decompress_fast() - Decompresses data from 'source' into 'dest'
+ * @source: source address of the compressed data
+ * @dest: output buffer address of the uncompressed data
+ * which must be already allocated with 'originalSize' bytes
+ * @originalSize: is the original and therefore uncompressed size
+ *
+ * Decompresses data from 'source' into 'dest'.
+ * This function fully respect memory boundaries for properly formed
+ * compressed data.
+ * It is a bit faster than LZ4_decompress_safe().
+ * However, it does not provide any protection against intentionally
+ * modified data stream (malicious input).
+ * Use this function in trusted environment only
+ * (data to decode comes from a trusted source).
+ *
+ * Return: number of bytes read from the source buffer
+ * or a negative result if decompression fails.
+ */
+int LZ4_decompress_fast(const char *source, char *dest, int originalSize);
+
+/**
+ * LZ4_decompress_safe() - Decompression protected against buffer overflow
+ * @source: source address of the compressed data
+ * @dest: output buffer address of the uncompressed data
+ * which must be already allocated
+ * @compressedSize: is the precise full size of the compressed block
+ * @maxDecompressedSize: is the size of 'dest' buffer
+ *
+ * Decompresses data fom 'source' into 'dest'.
+ * If the source stream is detected malformed, the function will
+ * stop decoding and return a negative result.
+ * This function is protected against buffer overflow exploits,
+ * including malicious data packets. It never writes outside output buffer,
+ * nor reads outside input buffer.
+ *
+ * Return: number of bytes decompressed into destination buffer
+ * (necessarily <= maxDecompressedSize)
+ * or a negative result in case of error
+ */
+int LZ4_decompress_safe(const char *source, char *dest, int compressedSize,
+ int maxDecompressedSize);
+
+/**
+ * LZ4_decompress_safe_partial() - Decompress a block of size 'compressedSize'
+ * at position 'source' into buffer 'dest'
+ * @source: source address of the compressed data
+ * @dest: output buffer address of the decompressed data which must be
+ * already allocated
+ * @compressedSize: is the precise full size of the compressed block.
+ * @targetOutputSize: the decompression operation will try
+ * to stop as soon as 'targetOutputSize' has been reached
+ * @maxDecompressedSize: is the size of destination buffer
+ *
+ * This function decompresses a compressed block of size 'compressedSize'
+ * at position 'source' into destination buffer 'dest'
+ * of size 'maxDecompressedSize'.
+ * The function tries to stop decompressing operation as soon as
+ * 'targetOutputSize' has been reached, reducing decompression time.
+ * This function never writes outside of output buffer,
+ * and never reads outside of input buffer.
+ * It is therefore protected against malicious data packets.
+ *
+ * Return: the number of bytes decoded in the destination buffer
+ * (necessarily <= maxDecompressedSize)
+ * or a negative result in case of error
+ *
+ */
+int LZ4_decompress_safe_partial(const char *source, char *dest,
+ int compressedSize, int targetOutputSize, int maxDecompressedSize);
+
+/*-************************************************************************
+ * LZ4 HC Compression
+ **************************************************************************/
+
+/**
+ * LZ4_compress_HC() - Compress data from `src` into `dst`, using HC algorithm
+ * @src: source address of the original data
+ * @dst: output buffer address of the compressed data
+ * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE
+ * @dstCapacity: full or partial size of buffer 'dst',
+ * which must be already allocated
+ * @compressionLevel: Recommended values are between 4 and 9, although any
+ * value between 1 and LZ4HC_MAX_CLEVEL will work.
+ * Values >LZ4HC_MAX_CLEVEL behave the same as 16.
+ * @wrkmem: address of the working memory.
+ * This requires 'wrkmem' of size LZ4HC_MEM_COMPRESS.
+ *
+ * Compress data from 'src' into 'dst', using the more powerful
+ * but slower "HC" algorithm. Compression is guaranteed to succeed if
+ * `dstCapacity >= LZ4_compressBound(srcSize)
+ *
+ * Return : the number of bytes written into 'dst' or 0 if compression fails.
+ */
+int LZ4_compress_HC(const char *src, char *dst, int srcSize, int dstCapacity,
+ int compressionLevel, void *wrkmem);
+
+/**
+ * LZ4_resetStreamHC() - Init an allocated 'LZ4_streamHC_t' structure
+ * @streamHCPtr: pointer to the 'LZ4_streamHC_t' structure
+ * @compressionLevel: Recommended values are between 4 and 9, although any
+ * value between 1 and LZ4HC_MAX_CLEVEL will work.
+ * Values >LZ4HC_MAX_CLEVEL behave the same as 16.
+ *
+ * An LZ4_streamHC_t structure can be allocated once
+ * and re-used multiple times.
+ * Use this function to init an allocated `LZ4_streamHC_t` structure
+ * and start a new compression.
+ */
+void LZ4_resetStreamHC(LZ4_streamHC_t *streamHCPtr, int compressionLevel);
+
+/**
+ * LZ4_loadDictHC() - Load a static dictionary into LZ4_streamHC
+ * @streamHCPtr: pointer to the LZ4HC_stream_t
+ * @dictionary: dictionary to load
+ * @dictSize: size of dictionary
+ *
+ * Use this function to load a static dictionary into LZ4HC_stream.
+ * Any previous data will be forgotten, only 'dictionary'
+ * will remain in memory.
+ * Loading a size of 0 is allowed.
+ *
+ * Return : dictionary size, in bytes (necessarily <= 64 KB)
+ */
+int LZ4_loadDictHC(LZ4_streamHC_t *streamHCPtr, const char *dictionary,
+ int dictSize);
+
+/**
+ * LZ4_compress_HC_continue() - Compress 'src' using data from previously
+ * compressed blocks as a dictionary using the HC algorithm
+ * @streamHCPtr: Pointer to the previous 'LZ4_streamHC_t' structure
+ * @src: source address of the original data
+ * @dst: output buffer address of the compressed data,
+ * which must be already allocated
+ * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE
+ * @maxDstSize: full or partial size of buffer 'dest'
+ * which must be already allocated
+ *
+ * These functions compress data in successive blocks of any size, using
+ * previous blocks as dictionary. One key assumption is that previous
+ * blocks (up to 64 KB) remain read-accessible while
+ * compressing next blocks. There is an exception for ring buffers,
+ * which can be smaller than 64 KB.
+ * Ring buffers scenario is automatically detected and handled by
+ * LZ4_compress_HC_continue().
+ * Before starting compression, state must be properly initialized,
+ * using LZ4_resetStreamHC().
+ * A first "fictional block" can then be designated as
+ * initial dictionary, using LZ4_loadDictHC() (Optional).
+ * Then, use LZ4_compress_HC_continue()
+ * to compress each successive block. Previous memory blocks
+ * (including initial dictionary when present) must remain accessible
+ * and unmodified during compression.
+ * 'dst' buffer should be sized to handle worst case scenarios, using
+ * LZ4_compressBound(), to ensure operation success.
+ * If, for any reason, previous data blocks can't be preserved unmodified
+ * in memory during next compression block,
+ * you must save it to a safer memory space, using LZ4_saveDictHC().
+ * Return value of LZ4_saveDictHC() is the size of dictionary
+ * effectively saved into 'safeBuffer'.
+ *
+ * Return: Number of bytes written into buffer 'dst' or 0 if compression fails
+ */
+int LZ4_compress_HC_continue(LZ4_streamHC_t *streamHCPtr, const char *src,
+ char *dst, int srcSize, int maxDstSize);
+
+/**
+ * LZ4_saveDictHC() - Save static dictionary from LZ4HC_stream
+ * @streamHCPtr: pointer to the 'LZ4HC_stream_t' structure
+ * @safeBuffer: buffer to save dictionary to, must be already allocated
+ * @maxDictSize: size of 'safeBuffer'
+ *
+ * If previously compressed data block is not guaranteed
+ * to remain available at its memory location,
+ * save it into a safer place (char *safeBuffer).
+ * Note : you don't need to call LZ4_loadDictHC() afterwards,
+ * dictionary is immediately usable, you can therefore call
+ * LZ4_compress_HC_continue().
+ *
+ * Return : saved dictionary size in bytes (necessarily <= maxDictSize),
+ * or 0 if error.
+ */
+int LZ4_saveDictHC(LZ4_streamHC_t *streamHCPtr, char *safeBuffer,
+ int maxDictSize);
+
+/*-*********************************************
+ * Streaming Compression Functions
+ ***********************************************/
+
+/**
+ * LZ4_resetStream() - Init an allocated 'LZ4_stream_t' structure
+ * @LZ4_stream: pointer to the 'LZ4_stream_t' structure
+ *
+ * An LZ4_stream_t structure can be allocated once
+ * and re-used multiple times.
+ * Use this function to init an allocated `LZ4_stream_t` structure
+ * and start a new compression.
+ */
+void LZ4_resetStream(LZ4_stream_t *LZ4_stream);
+
+/**
+ * LZ4_loadDict() - Load a static dictionary into LZ4_stream
+ * @streamPtr: pointer to the LZ4_stream_t
+ * @dictionary: dictionary to load
+ * @dictSize: size of dictionary
+ *
+ * Use this function to load a static dictionary into LZ4_stream.
+ * Any previous data will be forgotten, only 'dictionary'
+ * will remain in memory.
+ * Loading a size of 0 is allowed.
+ *
+ * Return : dictionary size, in bytes (necessarily <= 64 KB)
+ */
+int LZ4_loadDict(LZ4_stream_t *streamPtr, const char *dictionary,
+ int dictSize);
+
+/**
+ * LZ4_saveDict() - Save static dictionary from LZ4_stream
+ * @streamPtr: pointer to the 'LZ4_stream_t' structure
+ * @safeBuffer: buffer to save dictionary to, must be already allocated
+ * @dictSize: size of 'safeBuffer'
+ *
+ * If previously compressed data block is not guaranteed
+ * to remain available at its memory location,
+ * save it into a safer place (char *safeBuffer).
+ * Note : you don't need to call LZ4_loadDict() afterwards,
+ * dictionary is immediately usable, you can therefore call
+ * LZ4_compress_fast_continue().
+ *
+ * Return : saved dictionary size in bytes (necessarily <= dictSize),
+ * or 0 if error.
+ */
+int LZ4_saveDict(LZ4_stream_t *streamPtr, char *safeBuffer, int dictSize);
+
+/**
+ * LZ4_compress_fast_continue() - Compress 'src' using data from previously
+ * compressed blocks as a dictionary
+ * @streamPtr: Pointer to the previous 'LZ4_stream_t' structure
+ * @src: source address of the original data
+ * @dst: output buffer address of the compressed data,
+ * which must be already allocated
+ * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE
+ * @maxDstSize: full or partial size of buffer 'dest'
+ * which must be already allocated
+ * @acceleration: acceleration factor
+ *
+ * Compress buffer content 'src', using data from previously compressed blocks
+ * as dictionary to improve compression ratio.
+ * Important : Previous data blocks are assumed to still
+ * be present and unmodified !
+ * If maxDstSize >= LZ4_compressBound(srcSize),
+ * compression is guaranteed to succeed, and runs faster.
+ *
+ * Return: Number of bytes written into buffer 'dst' or 0 if compression fails
+ */
+int LZ4_compress_fast_continue(LZ4_stream_t *streamPtr, const char *src,
+ char *dst, int srcSize, int maxDstSize, int acceleration);
+
+/**
+ * LZ4_setStreamDecode() - Instruct where to find dictionary
+ * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure
+ * @dictionary: dictionary to use
+ * @dictSize: size of dictionary
+ *
+ * Use this function to instruct where to find the dictionary.
+ * Setting a size of 0 is allowed (same effect as reset).
+ *
+ * Return: 1 if OK, 0 if error
+ */
+int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
+ const char *dictionary, int dictSize);
+
+/**
+ * LZ4_decompress_fast_continue() - Decompress blocks in streaming mode
+ * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure
+ * @source: source address of the compressed data
+ * @dest: output buffer address of the uncompressed data
+ * which must be already allocated
+ * @compressedSize: is the precise full size of the compressed block
+ * @maxDecompressedSize: is the size of 'dest' buffer
+ *
+ * These decoding function allows decompression of multiple blocks
+ * in "streaming" mode.
+ * Previously decoded blocks *must* remain available at the memory position
+ * where they were decoded (up to 64 KB)
+ * In the case of a ring buffers, decoding buffer must be either :
+ * - Exactly same size as encoding buffer, with same update rule
+ * (block boundaries at same positions) In which case,
+ * the decoding & encoding ring buffer can have any size,
+ * including very small ones ( < 64 KB).
+ * - Larger than encoding buffer, by a minimum of maxBlockSize more bytes.
+ * maxBlockSize is implementation dependent.
+ * It's the maximum size you intend to compress into a single block.
+ * In which case, encoding and decoding buffers do not need
+ * to be synchronized, and encoding ring buffer can have any size,
+ * including small ones ( < 64 KB).
+ * - _At least_ 64 KB + 8 bytes + maxBlockSize.
+ * In which case, encoding and decoding buffers do not need to be
+ * synchronized, and encoding ring buffer can have any size,
+ * including larger than decoding buffer. W
+ * Whenever these conditions are not possible, save the last 64KB of decoded
+ * data into a safe buffer, and indicate where it is saved
+ * using LZ4_setStreamDecode()
+ *
+ * Return: number of bytes decompressed into destination buffer
+ * (necessarily <= maxDecompressedSize)
+ * or a negative result in case of error
+ */
+int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
+ const char *source, char *dest, int compressedSize,
+ int maxDecompressedSize);
+
+/**
+ * LZ4_decompress_fast_continue() - Decompress blocks in streaming mode
+ * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure
+ * @source: source address of the compressed data
+ * @dest: output buffer address of the uncompressed data
+ * which must be already allocated with 'originalSize' bytes
+ * @originalSize: is the original and therefore uncompressed size
+ *
+ * These decoding function allows decompression of multiple blocks
+ * in "streaming" mode.
+ * Previously decoded blocks *must* remain available at the memory position
+ * where they were decoded (up to 64 KB)
+ * In the case of a ring buffers, decoding buffer must be either :
+ * - Exactly same size as encoding buffer, with same update rule
+ * (block boundaries at same positions) In which case,
+ * the decoding & encoding ring buffer can have any size,
+ * including very small ones ( < 64 KB).
+ * - Larger than encoding buffer, by a minimum of maxBlockSize more bytes.
+ * maxBlockSize is implementation dependent.
+ * It's the maximum size you intend to compress into a single block.
+ * In which case, encoding and decoding buffers do not need
+ * to be synchronized, and encoding ring buffer can have any size,
+ * including small ones ( < 64 KB).
+ * - _At least_ 64 KB + 8 bytes + maxBlockSize.
+ * In which case, encoding and decoding buffers do not need to be
+ * synchronized, and encoding ring buffer can have any size,
+ * including larger than decoding buffer. W
+ * Whenever these conditions are not possible, save the last 64KB of decoded
+ * data into a safe buffer, and indicate where it is saved
+ * using LZ4_setStreamDecode()
+ *
+ * Return: number of bytes decompressed into destination buffer
+ * (necessarily <= maxDecompressedSize)
+ * or a negative result in case of error
+ */
+int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
+ const char *source, char *dest, int originalSize);
+
+/**
+ * LZ4_decompress_safe_usingDict() - Same as LZ4_setStreamDecode()
+ * followed by LZ4_decompress_safe_continue()
+ * @source: source address of the compressed data
+ * @dest: output buffer address of the uncompressed data
+ * which must be already allocated
+ * @compressedSize: is the precise full size of the compressed block
+ * @maxDecompressedSize: is the size of 'dest' buffer
+ * @dictStart: pointer to the start of the dictionary in memory
+ * @dictSize: size of dictionary
+ *
+ * These decoding function works the same as
+ * a combination of LZ4_setStreamDecode() followed by
+ * LZ4_decompress_safe_continue()
+ * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure.
+ *
+ * Return: number of bytes decompressed into destination buffer
+ * (necessarily <= maxDecompressedSize)
+ * or a negative result in case of error
+ */
+int LZ4_decompress_safe_usingDict(const char *source, char *dest,
+ int compressedSize, int maxDecompressedSize, const char *dictStart,
+ int dictSize);
+
+/**
+ * LZ4_decompress_fast_usingDict() - Same as LZ4_setStreamDecode()
+ * followed by LZ4_decompress_fast_continue()
+ * @source: source address of the compressed data
+ * @dest: output buffer address of the uncompressed data
+ * which must be already allocated with 'originalSize' bytes
+ * @originalSize: is the original and therefore uncompressed size
+ * @dictStart: pointer to the start of the dictionary in memory
+ * @dictSize: size of dictionary
+ *
+ * These decoding function works the same as
+ * a combination of LZ4_setStreamDecode() followed by
+ * LZ4_decompress_safe_continue()
+ * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure.
+ *
+ * Return: number of bytes decompressed into destination buffer
+ * (necessarily <= maxDecompressedSize)
+ * or a negative result in case of error
+ */
+int LZ4_decompress_fast_usingDict(const char *source, char *dest,
+ int originalSize, const char *dictStart, int dictSize);
+
#endif
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 38bcf00cbed3..bdfc65af4152 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -42,6 +42,7 @@ struct memblock_type {
unsigned long max; /* size of the allocated array */
phys_addr_t total_size; /* size of all regions */
struct memblock_region *regions;
+ char *name;
};
struct memblock {
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 093607f90b91..b723a686fc10 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -109,9 +109,6 @@ extern void unregister_memory_notifier(struct notifier_block *nb);
extern int register_memory_isolate_notifier(struct notifier_block *nb);
extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
extern int register_new_memory(int, struct mem_section *);
-extern int memory_block_change_state(struct memory_block *mem,
- unsigned long to_state,
- unsigned long from_state_req);
#ifdef CONFIG_MEMORY_HOTREMOVE
extern int unregister_memory_section(struct mem_section *);
#endif
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h
index 6483a6fdce59..ffb21e79204d 100644
--- a/include/linux/mfd/tps65910.h
+++ b/include/linux/mfd/tps65910.h
@@ -134,6 +134,7 @@
/* RTC_CTRL_REG bitfields */
#define TPS65910_RTC_CTRL_STOP_RTC 0x01 /*0=stop, 1=run */
+#define TPS65910_RTC_CTRL_AUTO_COMP 0x04
#define TPS65910_RTC_CTRL_GET_TIME 0x40
/* RTC_STATUS_REG bitfields */
diff --git a/include/linux/mic_bus.h b/include/linux/mic_bus.h
index 27d7c95fd0da..504d54c71bdb 100644
--- a/include/linux/mic_bus.h
+++ b/include/linux/mic_bus.h
@@ -90,7 +90,7 @@ struct mbus_hw_ops {
};
struct mbus_device *
-mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops,
+mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops,
struct mbus_hw_ops *hw_ops, int index,
void __iomem *mmio_va);
void mbus_unregister_device(struct mbus_device *mbdev);
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index ae8d475a9385..fa76b516fa47 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -37,7 +37,7 @@ extern int migrate_page(struct address_space *,
struct page *, struct page *, enum migrate_mode);
extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
unsigned long private, enum migrate_mode mode, int reason);
-extern bool isolate_movable_page(struct page *page, isolate_mode_t mode);
+extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
extern void putback_movable_page(struct page *page);
extern int migrate_prep(void);
@@ -56,6 +56,8 @@ static inline int migrate_pages(struct list_head *l, new_page_t new,
free_page_t free, unsigned long private, enum migrate_mode mode,
int reason)
{ return -ENOSYS; }
+static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
+ { return -EBUSY; }
static inline int migrate_prep(void) { return -ENOSYS; }
static inline int migrate_prep_local(void) { return -ENOSYS; }
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index e965e5090d96..a858bcb6220b 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -109,7 +109,7 @@ static inline void mlx4_u64_to_mac(u8 *addr, u64 mac)
int i;
for (i = ETH_ALEN; i > 0; i--) {
- addr[i - 1] = mac && 0xFF;
+ addr[i - 1] = mac & 0xFF;
mac >>= 8;
}
}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 574bc157a27c..0d65dd72c0f4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -314,6 +314,9 @@ struct vm_fault {
unsigned long address; /* Faulting virtual address */
pmd_t *pmd; /* Pointer to pmd entry matching
* the 'address' */
+ pud_t *pud; /* Pointer to pud entry matching
+ * the 'address'
+ */
pte_t orig_pte; /* Value of PTE at the time of fault */
struct page *cow_page; /* Page handler may use for COW fault */
@@ -341,6 +344,13 @@ struct vm_fault {
*/
};
+/* page entry size for vm->huge_fault() */
+enum page_entry_size {
+ PE_SIZE_PTE = 0,
+ PE_SIZE_PMD,
+ PE_SIZE_PUD,
+};
+
/*
* These are the virtual MM functions - opening of an area, closing and
* unmapping it (needed to keep files on disk up-to-date etc), pointer
@@ -350,17 +360,17 @@ struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
void (*close)(struct vm_area_struct * area);
int (*mremap)(struct vm_area_struct * area);
- int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
- int (*pmd_fault)(struct vm_fault *vmf);
+ int (*fault)(struct vm_fault *vmf);
+ int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size);
void (*map_pages)(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
/* notification that a previously read-only page is about to become
* writable, if an error is returned it will cause a SIGBUS */
- int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
+ int (*page_mkwrite)(struct vm_fault *vmf);
/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
- int (*pfn_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
+ int (*pfn_mkwrite)(struct vm_fault *vmf);
/* called by access_process_vm when get_user_pages() fails, typically
* for use by special VMAs that can switch between memory and hardware
@@ -416,6 +426,10 @@ static inline int pmd_devmap(pmd_t pmd)
{
return 0;
}
+static inline int pud_devmap(pud_t pud)
+{
+ return 0;
+}
#endif
/*
@@ -1154,16 +1168,6 @@ extern void pagefault_out_of_memory(void);
extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
-int shmem_zero_setup(struct vm_area_struct *);
-#ifdef CONFIG_SHMEM
-bool shmem_mapping(struct address_space *mapping);
-#else
-static inline bool shmem_mapping(struct address_space *mapping)
-{
- return false;
-}
-#endif
-
extern bool can_do_mlock(void);
extern int user_shm_lock(size_t, struct user_struct *);
extern void user_shm_unlock(size_t, struct user_struct *);
@@ -1191,6 +1195,10 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
/**
* mm_walk - callbacks for walk_page_range
+ * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
+ * this handler should only handle pud_trans_huge() puds.
+ * the pmd_entry or pte_entry callbacks will be used for
+ * regular PUDs.
* @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
* this handler is required to be able to handle
* pmd_trans_huge() pmds. They may simply choose to
@@ -1210,6 +1218,8 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
* (see the comment on walk_page_range() for more details)
*/
struct mm_walk {
+ int (*pud_entry)(pud_t *pud, unsigned long addr,
+ unsigned long next, struct mm_walk *walk);
int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
unsigned long next, struct mm_walk *walk);
int (*pte_entry)(pte_t *pte, unsigned long addr,
@@ -1793,8 +1803,26 @@ static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
return ptl;
}
-extern void __init pagecache_init(void);
+/*
+ * No scalability reason to split PUD locks yet, but follow the same pattern
+ * as the PMD locks to make it easier if we decide to. The VM should not be
+ * considered ready to switch to split PUD locks yet; there may be places
+ * which need to be converted from page_table_lock.
+ */
+static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
+{
+ return &mm->page_table_lock;
+}
+static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
+{
+ spinlock_t *ptl = pud_lockptr(mm, pud);
+
+ spin_lock(ptl);
+ return ptl;
+}
+
+extern void __init pagecache_init(void);
extern void free_area_init(unsigned long * zones_size);
extern void free_area_init_node(int nid, unsigned long * zones_size,
unsigned long zone_start_pfn, unsigned long *zholes_size);
@@ -2003,8 +2031,10 @@ extern struct vm_area_struct *vma_merge(struct mm_struct *,
unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
struct mempolicy *, struct vm_userfaultfd_ctx);
extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
-extern int split_vma(struct mm_struct *,
- struct vm_area_struct *, unsigned long addr, int new_below);
+extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
+ unsigned long addr, int new_below);
+extern int split_vma(struct mm_struct *, struct vm_area_struct *,
+ unsigned long addr, int new_below);
extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
struct rb_node **, struct rb_node *);
@@ -2052,18 +2082,22 @@ extern int install_special_mapping(struct mm_struct *mm,
extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
extern unsigned long mmap_region(struct file *file, unsigned long addr,
- unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
+ unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
+ struct list_head *uf);
extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
- vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate);
-extern int do_munmap(struct mm_struct *, unsigned long, size_t);
+ vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
+ struct list_head *uf);
+extern int do_munmap(struct mm_struct *, unsigned long, size_t,
+ struct list_head *uf);
static inline unsigned long
do_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
- unsigned long pgoff, unsigned long *populate)
+ unsigned long pgoff, unsigned long *populate,
+ struct list_head *uf)
{
- return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate);
+ return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf);
}
#ifdef CONFIG_MMU
@@ -2124,10 +2158,10 @@ extern void truncate_inode_pages_range(struct address_space *,
extern void truncate_inode_pages_final(struct address_space *);
/* generic vm_area_ops exported for stackable file systems */
-extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
+extern int filemap_fault(struct vm_fault *vmf);
extern void filemap_map_pages(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
-extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
+extern int filemap_page_mkwrite(struct vm_fault *vmf);
/* mm/page-writeback.c */
int write_one_page(struct page *page, int wait);
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 41d376e7116d..e030a68ead7e 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -50,6 +50,13 @@ static __always_inline void add_page_to_lru_list(struct page *page,
list_add(&page->lru, &lruvec->lists[lru]);
}
+static __always_inline void add_page_to_lru_list_tail(struct page *page,
+ struct lruvec *lruvec, enum lru_list lru)
+{
+ update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
+ list_add_tail(&page->lru, &lruvec->lists[lru]);
+}
+
static __always_inline void del_page_from_lru_list(struct page *page,
struct lruvec *lruvec, enum lru_list lru)
{
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 808751d7b737..4f6d440ad785 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -407,8 +407,27 @@ struct mm_struct {
unsigned long task_size; /* size of task vm space */
unsigned long highest_vm_end; /* highest vma end address */
pgd_t * pgd;
- atomic_t mm_users; /* How many users with user space? */
- atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
+
+ /**
+ * @mm_users: The number of users including userspace.
+ *
+ * Use mmget()/mmget_not_zero()/mmput() to modify. When this drops
+ * to 0 (i.e. when the task exits and there are no other temporary
+ * reference holders), we also release a reference on @mm_count
+ * (which may then free the &struct mm_struct if @mm_count also
+ * drops to 0).
+ */
+ atomic_t mm_users;
+
+ /**
+ * @mm_count: The number of references to &struct mm_struct
+ * (@mm_users count as 1).
+ *
+ * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
+ * &struct mm_struct is freed.
+ */
+ atomic_t mm_count;
+
atomic_long_t nr_ptes; /* PTE page table pages */
#if CONFIG_PGTABLE_LEVELS > 2
atomic_long_t nr_pmds; /* PMD page table pages */
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index a1a210d59961..51891fb0d3ce 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -381,6 +381,19 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
___pmd; \
})
+#define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \
+({ \
+ unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \
+ struct mm_struct *___mm = (__vma)->vm_mm; \
+ pud_t ___pud; \
+ \
+ ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \
+ mmu_notifier_invalidate_range(___mm, ___haddr, \
+ ___haddr + HPAGE_PUD_SIZE); \
+ \
+ ___pud; \
+})
+
#define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd) \
({ \
unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
@@ -475,6 +488,7 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
#define pmdp_clear_young_notify pmdp_test_and_clear_young
#define ptep_clear_flush_notify ptep_clear_flush
#define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
+#define pudp_huge_clear_flush_notify pudp_huge_clear_flush
#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
#define set_pte_at_notify set_pte_at
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 82fc632fd11d..8e02b3750fe0 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -236,8 +236,6 @@ struct lruvec {
#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
-/* Isolate clean file */
-#define ISOLATE_CLEAN ((__force isolate_mode_t)0x1)
/* Isolate unmapped file */
#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
/* Isolate for asynchronous migration */
diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h
index 7b3d487d8b3f..b532ce524dae 100644
--- a/include/linux/mtd/qinfo.h
+++ b/include/linux/mtd/qinfo.h
@@ -14,7 +14,7 @@
* @DevId - Chip Device ID
* @qinfo - pointer to qinfo records describing the chip
* @numchips - number of chips including virual RWW partitions
- * @chipshift - Chip/partiton size 2^chipshift
+ * @chipshift - Chip/partition size 2^chipshift
* @chips - per-chip data structure
*/
struct lpddr_private {
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index f40f0ab3847a..97456b2539e4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -330,6 +330,7 @@ struct napi_struct {
enum {
NAPI_STATE_SCHED, /* Poll is scheduled */
+ NAPI_STATE_MISSED, /* reschedule a napi */
NAPI_STATE_DISABLE, /* Disable pending */
NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */
@@ -338,12 +339,13 @@ enum {
};
enum {
- NAPIF_STATE_SCHED = (1UL << NAPI_STATE_SCHED),
- NAPIF_STATE_DISABLE = (1UL << NAPI_STATE_DISABLE),
- NAPIF_STATE_NPSVC = (1UL << NAPI_STATE_NPSVC),
- NAPIF_STATE_HASHED = (1UL << NAPI_STATE_HASHED),
- NAPIF_STATE_NO_BUSY_POLL = (1UL << NAPI_STATE_NO_BUSY_POLL),
- NAPIF_STATE_IN_BUSY_POLL = (1UL << NAPI_STATE_IN_BUSY_POLL),
+ NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
+ NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
+ NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
+ NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
+ NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED),
+ NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
+ NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
};
enum gro_result {
@@ -414,20 +416,7 @@ static inline bool napi_disable_pending(struct napi_struct *n)
return test_bit(NAPI_STATE_DISABLE, &n->state);
}
-/**
- * napi_schedule_prep - check if NAPI can be scheduled
- * @n: NAPI context
- *
- * Test if NAPI routine is already running, and if not mark
- * it as running. This is used as a condition variable to
- * insure only one NAPI poll instance runs. We also make
- * sure there is no pending NAPI disable.
- */
-static inline bool napi_schedule_prep(struct napi_struct *n)
-{
- return !napi_disable_pending(n) &&
- !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
-}
+bool napi_schedule_prep(struct napi_struct *n);
/**
* napi_schedule - schedule NAPI poll
diff --git a/include/linux/nvme-rdma.h b/include/linux/nvme-rdma.h
index bf240a3cbf99..a72fd04aa5e1 100644
--- a/include/linux/nvme-rdma.h
+++ b/include/linux/nvme-rdma.h
@@ -29,6 +29,30 @@ enum nvme_rdma_cm_status {
NVME_RDMA_CM_INVALID_ORD = 0x08,
};
+static inline const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status)
+{
+ switch (status) {
+ case NVME_RDMA_CM_INVALID_LEN:
+ return "invalid length";
+ case NVME_RDMA_CM_INVALID_RECFMT:
+ return "invalid record format";
+ case NVME_RDMA_CM_INVALID_QID:
+ return "invalid queue ID";
+ case NVME_RDMA_CM_INVALID_HSQSIZE:
+ return "invalid host SQ size";
+ case NVME_RDMA_CM_INVALID_HRQSIZE:
+ return "invalid host RQ size";
+ case NVME_RDMA_CM_NO_RSC:
+ return "resource not found";
+ case NVME_RDMA_CM_INVALID_IRD:
+ return "invalid IRD";
+ case NVME_RDMA_CM_INVALID_ORD:
+ return "Invalid ORD";
+ default:
+ return "unrecognized reason";
+ }
+}
+
/**
* struct nvme_rdma_cm_req - rdma connect request
*
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 0b676a02cf3e..c43d435d4225 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -579,6 +579,12 @@ struct nvme_write_zeroes_cmd {
__le16 appmask;
};
+/* Features */
+
+struct nvme_feat_auto_pst {
+ __le64 entries[32];
+};
+
/* Admin commands */
enum nvme_admin_opcode {
@@ -644,7 +650,9 @@ struct nvme_identify {
__le32 nsid;
__u64 rsvd2[2];
union nvme_data_ptr dptr;
- __le32 cns;
+ __u8 cns;
+ __u8 rsvd3;
+ __le16 ctrlid;
__u32 rsvd11[5];
};
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h
index 033fc7bbcefa..a49b3259cad7 100644
--- a/include/linux/pfn_t.h
+++ b/include/linux/pfn_t.h
@@ -90,6 +90,13 @@ static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot)
{
return pfn_pmd(pfn_t_to_pfn(pfn), pgprot);
}
+
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+static inline pud_t pfn_t_pud(pfn_t pfn, pgprot_t pgprot)
+{
+ return pfn_pud(pfn_t_to_pfn(pfn), pgprot);
+}
+#endif
#endif
#ifdef __HAVE_ARCH_PTE_DEVMAP
@@ -106,5 +113,10 @@ static inline bool pfn_t_devmap(pfn_t pfn)
}
pte_t pte_mkdevmap(pte_t pte);
pmd_t pmd_mkdevmap(pmd_t pmd);
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
+ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
+pud_t pud_mkdevmap(pud_t pud);
#endif
+#endif /* __HAVE_ARCH_PTE_DEVMAP */
+
#endif /* _LINUX_PFN_T_H_ */
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 23705a53abba..298ead5512e5 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -191,10 +191,10 @@ pid_t pid_vnr(struct pid *pid);
#define do_each_pid_thread(pid, type, task) \
do_each_pid_task(pid, type, task) { \
struct task_struct *tg___ = task; \
- do {
+ for_each_thread(tg___, task) {
#define while_each_pid_thread(pid, type, task) \
- } while_each_thread(tg___, task); \
+ } \
task = tg___; \
} while_each_pid_task(pid, type, task)
#endif /* _LINUX_PID_H */
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 34cce96741bc..c2a989dee876 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -21,6 +21,12 @@ struct pidmap {
struct fs_pin;
+enum { /* definitions for pid_namespace's hide_pid field */
+ HIDEPID_OFF = 0,
+ HIDEPID_NO_ACCESS = 1,
+ HIDEPID_INVISIBLE = 2,
+};
+
struct pid_namespace {
struct kref kref;
struct pidmap pidmap[PIDMAP_ENTRIES];
diff --git a/include/linux/platform_data/rtc-m48t86.h b/include/linux/platform_data/rtc-m48t86.h
deleted file mode 100644
index 915d6b4f0f89..000000000000
--- a/include/linux/platform_data/rtc-m48t86.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * ST M48T86 / Dallas DS12887 RTC driver
- * Copyright (c) 2006 Tower Technologies
- *
- * Author: Alessandro Zummo <a.zummo@towertech.it>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-struct m48t86_ops
-{
- void (*writebyte)(unsigned char value, unsigned long addr);
- unsigned char (*readbyte)(unsigned long addr);
-};
diff --git a/include/linux/platform_data/video-imxfb.h b/include/linux/platform_data/video-imxfb.h
index 18e908324549..a5c0a71ec914 100644
--- a/include/linux/platform_data/video-imxfb.h
+++ b/include/linux/platform_data/video-imxfb.h
@@ -47,10 +47,6 @@
#define LSCR1_GRAY2(x) (((x) & 0xf) << 4)
#define LSCR1_GRAY1(x) (((x) & 0xf))
-#define DMACR_BURST (1 << 31)
-#define DMACR_HM(x) (((x) & 0xf) << 16)
-#define DMACR_TM(x) ((x) & 0xf)
-
struct imx_fb_videomode {
struct fb_videomode mode;
u32 pcr;
diff --git a/include/linux/platform_data/x86/clk-pmc-atom.h b/include/linux/platform_data/x86/clk-pmc-atom.h
new file mode 100644
index 000000000000..3ab892208343
--- /dev/null
+++ b/include/linux/platform_data/x86/clk-pmc-atom.h
@@ -0,0 +1,44 @@
+/*
+ * Intel Atom platform clocks for BayTrail and CherryTrail SoC.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Irina Tirdea <irina.tirdea@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __PLATFORM_DATA_X86_CLK_PMC_ATOM_H
+#define __PLATFORM_DATA_X86_CLK_PMC_ATOM_H
+
+/**
+ * struct pmc_clk - PMC platform clock configuration
+ *
+ * @name: identified, typically pmc_plt_clk_<x>, x=[0..5]
+ * @freq: in Hz, 19.2MHz and 25MHz (Baytrail only) supported
+ * @parent_name: one of 'xtal' or 'osc'
+ */
+struct pmc_clk {
+ const char *name;
+ unsigned long freq;
+ const char *parent_name;
+};
+
+/**
+ * struct pmc_clk_data - common PMC clock configuration
+ *
+ * @base: PMC clock register base offset
+ * @clks: pointer to set of registered clocks, typically 0..5
+ */
+struct pmc_clk_data {
+ void __iomem *base;
+ const struct pmc_clk *clks;
+};
+
+#endif /* __PLATFORM_DATA_X86_CLK_PMC_ATOM_H */
diff --git a/include/linux/platform_data/x86/pmc_atom.h b/include/linux/platform_data/x86/pmc_atom.h
new file mode 100644
index 000000000000..e4905fe69c38
--- /dev/null
+++ b/include/linux/platform_data/x86/pmc_atom.h
@@ -0,0 +1,158 @@
+/*
+ * Intel Atom SOC Power Management Controller Header File
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef PMC_ATOM_H
+#define PMC_ATOM_H
+
+/* ValleyView Power Control Unit PCI Device ID */
+#define PCI_DEVICE_ID_VLV_PMC 0x0F1C
+/* CherryTrail Power Control Unit PCI Device ID */
+#define PCI_DEVICE_ID_CHT_PMC 0x229C
+
+/* PMC Memory mapped IO registers */
+#define PMC_BASE_ADDR_OFFSET 0x44
+#define PMC_BASE_ADDR_MASK 0xFFFFFE00
+#define PMC_MMIO_REG_LEN 0x100
+#define PMC_REG_BIT_WIDTH 32
+
+/* BIOS uses FUNC_DIS to disable specific function */
+#define PMC_FUNC_DIS 0x34
+#define PMC_FUNC_DIS_2 0x38
+
+/* CHT specific bits in FUNC_DIS2 register */
+#define BIT_FD_GMM BIT(3)
+#define BIT_FD_ISH BIT(4)
+
+/* S0ix wake event control */
+#define PMC_S0IX_WAKE_EN 0x3C
+
+#define BIT_LPC_CLOCK_RUN BIT(4)
+#define BIT_SHARED_IRQ_GPSC BIT(5)
+#define BIT_ORED_DEDICATED_IRQ_GPSS BIT(18)
+#define BIT_ORED_DEDICATED_IRQ_GPSC BIT(19)
+#define BIT_SHARED_IRQ_GPSS BIT(20)
+
+#define PMC_WAKE_EN_SETTING ~(BIT_LPC_CLOCK_RUN | \
+ BIT_SHARED_IRQ_GPSC | \
+ BIT_ORED_DEDICATED_IRQ_GPSS | \
+ BIT_ORED_DEDICATED_IRQ_GPSC | \
+ BIT_SHARED_IRQ_GPSS)
+
+/* The timers accumulate time spent in sleep state */
+#define PMC_S0IR_TMR 0x80
+#define PMC_S0I1_TMR 0x84
+#define PMC_S0I2_TMR 0x88
+#define PMC_S0I3_TMR 0x8C
+#define PMC_S0_TMR 0x90
+/* Sleep state counter is in units of of 32us */
+#define PMC_TMR_SHIFT 5
+
+/* Power status of power islands */
+#define PMC_PSS 0x98
+
+#define PMC_PSS_BIT_GBE BIT(0)
+#define PMC_PSS_BIT_SATA BIT(1)
+#define PMC_PSS_BIT_HDA BIT(2)
+#define PMC_PSS_BIT_SEC BIT(3)
+#define PMC_PSS_BIT_PCIE BIT(4)
+#define PMC_PSS_BIT_LPSS BIT(5)
+#define PMC_PSS_BIT_LPE BIT(6)
+#define PMC_PSS_BIT_DFX BIT(7)
+#define PMC_PSS_BIT_USH_CTRL BIT(8)
+#define PMC_PSS_BIT_USH_SUS BIT(9)
+#define PMC_PSS_BIT_USH_VCCS BIT(10)
+#define PMC_PSS_BIT_USH_VCCA BIT(11)
+#define PMC_PSS_BIT_OTG_CTRL BIT(12)
+#define PMC_PSS_BIT_OTG_VCCS BIT(13)
+#define PMC_PSS_BIT_OTG_VCCA_CLK BIT(14)
+#define PMC_PSS_BIT_OTG_VCCA BIT(15)
+#define PMC_PSS_BIT_USB BIT(16)
+#define PMC_PSS_BIT_USB_SUS BIT(17)
+
+/* CHT specific bits in PSS register */
+#define PMC_PSS_BIT_CHT_UFS BIT(7)
+#define PMC_PSS_BIT_CHT_UXD BIT(11)
+#define PMC_PSS_BIT_CHT_UXD_FD BIT(12)
+#define PMC_PSS_BIT_CHT_UX_ENG BIT(15)
+#define PMC_PSS_BIT_CHT_USB_SUS BIT(16)
+#define PMC_PSS_BIT_CHT_GMM BIT(17)
+#define PMC_PSS_BIT_CHT_ISH BIT(18)
+#define PMC_PSS_BIT_CHT_DFX_MASTER BIT(26)
+#define PMC_PSS_BIT_CHT_DFX_CLUSTER1 BIT(27)
+#define PMC_PSS_BIT_CHT_DFX_CLUSTER2 BIT(28)
+#define PMC_PSS_BIT_CHT_DFX_CLUSTER3 BIT(29)
+#define PMC_PSS_BIT_CHT_DFX_CLUSTER4 BIT(30)
+#define PMC_PSS_BIT_CHT_DFX_CLUSTER5 BIT(31)
+
+/* These registers reflect D3 status of functions */
+#define PMC_D3_STS_0 0xA0
+
+#define BIT_LPSS1_F0_DMA BIT(0)
+#define BIT_LPSS1_F1_PWM1 BIT(1)
+#define BIT_LPSS1_F2_PWM2 BIT(2)
+#define BIT_LPSS1_F3_HSUART1 BIT(3)
+#define BIT_LPSS1_F4_HSUART2 BIT(4)
+#define BIT_LPSS1_F5_SPI BIT(5)
+#define BIT_LPSS1_F6_XXX BIT(6)
+#define BIT_LPSS1_F7_XXX BIT(7)
+#define BIT_SCC_EMMC BIT(8)
+#define BIT_SCC_SDIO BIT(9)
+#define BIT_SCC_SDCARD BIT(10)
+#define BIT_SCC_MIPI BIT(11)
+#define BIT_HDA BIT(12)
+#define BIT_LPE BIT(13)
+#define BIT_OTG BIT(14)
+#define BIT_USH BIT(15)
+#define BIT_GBE BIT(16)
+#define BIT_SATA BIT(17)
+#define BIT_USB_EHCI BIT(18)
+#define BIT_SEC BIT(19)
+#define BIT_PCIE_PORT0 BIT(20)
+#define BIT_PCIE_PORT1 BIT(21)
+#define BIT_PCIE_PORT2 BIT(22)
+#define BIT_PCIE_PORT3 BIT(23)
+#define BIT_LPSS2_F0_DMA BIT(24)
+#define BIT_LPSS2_F1_I2C1 BIT(25)
+#define BIT_LPSS2_F2_I2C2 BIT(26)
+#define BIT_LPSS2_F3_I2C3 BIT(27)
+#define BIT_LPSS2_F4_I2C4 BIT(28)
+#define BIT_LPSS2_F5_I2C5 BIT(29)
+#define BIT_LPSS2_F6_I2C6 BIT(30)
+#define BIT_LPSS2_F7_I2C7 BIT(31)
+
+#define PMC_D3_STS_1 0xA4
+#define BIT_SMB BIT(0)
+#define BIT_OTG_SS_PHY BIT(1)
+#define BIT_USH_SS_PHY BIT(2)
+#define BIT_DFX BIT(3)
+
+/* CHT specific bits in PMC_D3_STS_1 register */
+#define BIT_STS_GMM BIT(1)
+#define BIT_STS_ISH BIT(2)
+
+/* PMC I/O Registers */
+#define ACPI_BASE_ADDR_OFFSET 0x40
+#define ACPI_BASE_ADDR_MASK 0xFFFFFE00
+#define ACPI_MMIO_REG_LEN 0x100
+
+#define PM1_CNT 0x4
+#define SLEEP_TYPE_MASK 0xFFFFECFF
+#define SLEEP_TYPE_S5 0x1C00
+#define SLEEP_ENABLE 0x2000
+
+extern int pmc_atom_read(int offset, u32 *value);
+extern int pmc_atom_write(int offset, u32 value);
+
+#endif /* PMC_ATOM_H */
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 52bda854593b..3e5735064b71 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -22,11 +22,13 @@
#define _LINUX_RADIX_TREE_H
#include <linux/bitops.h>
-#include <linux/preempt.h>
-#include <linux/types.h>
#include <linux/bug.h>
#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/preempt.h>
#include <linux/rcupdate.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
/*
* The bottom two bits of the slot determine how the remaining bits in the
@@ -94,7 +96,7 @@ struct radix_tree_node {
unsigned char count; /* Total entry count */
unsigned char exceptional; /* Exceptional entry count */
struct radix_tree_node *parent; /* Used when ascending tree */
- void *private_data; /* For tree user */
+ struct radix_tree_root *root; /* The tree we belong to */
union {
struct list_head private_list; /* For tree user */
struct rcu_head rcu_head; /* Used when freeing node */
@@ -103,7 +105,10 @@ struct radix_tree_node {
unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
};
-/* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */
+/* The top bits of gfp_mask are used to store the root tags and the IDR flag */
+#define ROOT_IS_IDR ((__force gfp_t)(1 << __GFP_BITS_SHIFT))
+#define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT + 1)
+
struct radix_tree_root {
gfp_t gfp_mask;
struct radix_tree_node __rcu *rnode;
@@ -123,7 +128,7 @@ do { \
(root)->rnode = NULL; \
} while (0)
-static inline bool radix_tree_empty(struct radix_tree_root *root)
+static inline bool radix_tree_empty(const struct radix_tree_root *root)
{
return root->rnode == NULL;
}
@@ -216,10 +221,8 @@ static inline unsigned int iter_shift(const struct radix_tree_iter *iter)
*/
/**
- * radix_tree_deref_slot - dereference a slot
- * @pslot: pointer to slot, returned by radix_tree_lookup_slot
- * Returns: item that was stored in that slot with any direct pointer flag
- * removed.
+ * radix_tree_deref_slot - dereference a slot
+ * @slot: slot pointer, returned by radix_tree_lookup_slot
*
* For use with radix_tree_lookup_slot(). Caller must hold tree at least read
* locked across slot lookup and dereference. Not required if write lock is
@@ -227,26 +230,27 @@ static inline unsigned int iter_shift(const struct radix_tree_iter *iter)
*
* radix_tree_deref_retry must be used to confirm validity of the pointer if
* only the read lock is held.
+ *
+ * Return: entry stored in that slot.
*/
-static inline void *radix_tree_deref_slot(void **pslot)
+static inline void *radix_tree_deref_slot(void __rcu **slot)
{
- return rcu_dereference(*pslot);
+ return rcu_dereference(*slot);
}
/**
- * radix_tree_deref_slot_protected - dereference a slot without RCU lock but with tree lock held
- * @pslot: pointer to slot, returned by radix_tree_lookup_slot
- * Returns: item that was stored in that slot with any direct pointer flag
- * removed.
- *
- * Similar to radix_tree_deref_slot but only used during migration when a pages
- * mapping is being moved. The caller does not hold the RCU read lock but it
- * must hold the tree lock to prevent parallel updates.
+ * radix_tree_deref_slot_protected - dereference a slot with tree lock held
+ * @slot: slot pointer, returned by radix_tree_lookup_slot
+ *
+ * Similar to radix_tree_deref_slot. The caller does not hold the RCU read
+ * lock but it must hold the tree lock to prevent parallel updates.
+ *
+ * Return: entry stored in that slot.
*/
-static inline void *radix_tree_deref_slot_protected(void **pslot,
+static inline void *radix_tree_deref_slot_protected(void __rcu **slot,
spinlock_t *treelock)
{
- return rcu_dereference_protected(*pslot, lockdep_is_held(treelock));
+ return rcu_dereference_protected(*slot, lockdep_is_held(treelock));
}
/**
@@ -282,9 +286,9 @@ static inline int radix_tree_exception(void *arg)
return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK);
}
-int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
+int __radix_tree_create(struct radix_tree_root *, unsigned long index,
unsigned order, struct radix_tree_node **nodep,
- void ***slotp);
+ void __rcu ***slotp);
int __radix_tree_insert(struct radix_tree_root *, unsigned long index,
unsigned order, void *);
static inline int radix_tree_insert(struct radix_tree_root *root,
@@ -292,55 +296,56 @@ static inline int radix_tree_insert(struct radix_tree_root *root,
{
return __radix_tree_insert(root, index, 0, entry);
}
-void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index,
- struct radix_tree_node **nodep, void ***slotp);
-void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
-void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long);
+void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index,
+ struct radix_tree_node **nodep, void __rcu ***slotp);
+void *radix_tree_lookup(const struct radix_tree_root *, unsigned long);
+void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *,
+ unsigned long index);
typedef void (*radix_tree_update_node_t)(struct radix_tree_node *, void *);
-void __radix_tree_replace(struct radix_tree_root *root,
- struct radix_tree_node *node,
- void **slot, void *item,
+void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *,
+ void __rcu **slot, void *entry,
radix_tree_update_node_t update_node, void *private);
void radix_tree_iter_replace(struct radix_tree_root *,
- const struct radix_tree_iter *, void **slot, void *item);
-void radix_tree_replace_slot(struct radix_tree_root *root,
- void **slot, void *item);
-void __radix_tree_delete_node(struct radix_tree_root *root,
- struct radix_tree_node *node,
+ const struct radix_tree_iter *, void __rcu **slot, void *entry);
+void radix_tree_replace_slot(struct radix_tree_root *,
+ void __rcu **slot, void *entry);
+void __radix_tree_delete_node(struct radix_tree_root *,
+ struct radix_tree_node *,
radix_tree_update_node_t update_node,
void *private);
+void radix_tree_iter_delete(struct radix_tree_root *,
+ struct radix_tree_iter *iter, void __rcu **slot);
void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
void *radix_tree_delete(struct radix_tree_root *, unsigned long);
-void radix_tree_clear_tags(struct radix_tree_root *root,
- struct radix_tree_node *node,
- void **slot);
-unsigned int radix_tree_gang_lookup(struct radix_tree_root *root,
+void radix_tree_clear_tags(struct radix_tree_root *, struct radix_tree_node *,
+ void __rcu **slot);
+unsigned int radix_tree_gang_lookup(const struct radix_tree_root *,
void **results, unsigned long first_index,
unsigned int max_items);
-unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
- void ***results, unsigned long *indices,
+unsigned int radix_tree_gang_lookup_slot(const struct radix_tree_root *,
+ void __rcu ***results, unsigned long *indices,
unsigned long first_index, unsigned int max_items);
int radix_tree_preload(gfp_t gfp_mask);
int radix_tree_maybe_preload(gfp_t gfp_mask);
int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);
void radix_tree_init(void);
-void *radix_tree_tag_set(struct radix_tree_root *root,
+void *radix_tree_tag_set(struct radix_tree_root *,
unsigned long index, unsigned int tag);
-void *radix_tree_tag_clear(struct radix_tree_root *root,
+void *radix_tree_tag_clear(struct radix_tree_root *,
unsigned long index, unsigned int tag);
-int radix_tree_tag_get(struct radix_tree_root *root,
+int radix_tree_tag_get(const struct radix_tree_root *,
unsigned long index, unsigned int tag);
-void radix_tree_iter_tag_set(struct radix_tree_root *root,
+void radix_tree_iter_tag_set(struct radix_tree_root *,
+ const struct radix_tree_iter *iter, unsigned int tag);
+void radix_tree_iter_tag_clear(struct radix_tree_root *,
const struct radix_tree_iter *iter, unsigned int tag);
-unsigned int
-radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
- unsigned long first_index, unsigned int max_items,
- unsigned int tag);
-unsigned int
-radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
- unsigned long first_index, unsigned int max_items,
- unsigned int tag);
-int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
+unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *,
+ void **results, unsigned long first_index,
+ unsigned int max_items, unsigned int tag);
+unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *,
+ void __rcu ***results, unsigned long first_index,
+ unsigned int max_items, unsigned int tag);
+int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag);
static inline void radix_tree_preload_end(void)
{
@@ -352,10 +357,14 @@ int radix_tree_split(struct radix_tree_root *, unsigned long index,
unsigned new_order);
int radix_tree_join(struct radix_tree_root *, unsigned long index,
unsigned new_order, void *);
+void __rcu **idr_get_free(struct radix_tree_root *, struct radix_tree_iter *,
+ gfp_t, int end);
-#define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */
-#define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */
-#define RADIX_TREE_ITER_CONTIG 0x0200 /* stop at first hole */
+enum {
+ RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */
+ RADIX_TREE_ITER_TAGGED = 0x10, /* lookup tagged slots */
+ RADIX_TREE_ITER_CONTIG = 0x20, /* stop at first hole */
+};
/**
* radix_tree_iter_init - initialize radix tree iterator
@@ -364,7 +373,7 @@ int radix_tree_join(struct radix_tree_root *, unsigned long index,
* @start: iteration starting index
* Returns: NULL
*/
-static __always_inline void **
+static __always_inline void __rcu **
radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
{
/*
@@ -393,10 +402,46 @@ radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
* Also it fills @iter with data about chunk: position in the tree (index),
* its end (next_index), and constructs a bit mask for tagged iterating (tags).
*/
-void **radix_tree_next_chunk(struct radix_tree_root *root,
+void __rcu **radix_tree_next_chunk(const struct radix_tree_root *,
struct radix_tree_iter *iter, unsigned flags);
/**
+ * radix_tree_iter_lookup - look up an index in the radix tree
+ * @root: radix tree root
+ * @iter: iterator state
+ * @index: key to look up
+ *
+ * If @index is present in the radix tree, this function returns the slot
+ * containing it and updates @iter to describe the entry. If @index is not
+ * present, it returns NULL.
+ */
+static inline void __rcu **
+radix_tree_iter_lookup(const struct radix_tree_root *root,
+ struct radix_tree_iter *iter, unsigned long index)
+{
+ radix_tree_iter_init(iter, index);
+ return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG);
+}
+
+/**
+ * radix_tree_iter_find - find a present entry
+ * @root: radix tree root
+ * @iter: iterator state
+ * @index: start location
+ *
+ * This function returns the slot containing the entry with the lowest index
+ * which is at least @index. If @index is larger than any present entry, this
+ * function returns NULL. The @iter is updated to describe the entry found.
+ */
+static inline void __rcu **
+radix_tree_iter_find(const struct radix_tree_root *root,
+ struct radix_tree_iter *iter, unsigned long index)
+{
+ radix_tree_iter_init(iter, index);
+ return radix_tree_next_chunk(root, iter, 0);
+}
+
+/**
* radix_tree_iter_retry - retry this chunk of the iteration
* @iter: iterator state
*
@@ -406,7 +451,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
* and continue the iteration.
*/
static inline __must_check
-void **radix_tree_iter_retry(struct radix_tree_iter *iter)
+void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter)
{
iter->next_index = iter->index;
iter->tags = 0;
@@ -429,7 +474,7 @@ __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots)
* have been invalidated by an insertion or deletion. Call this function
* before releasing the lock to continue the iteration from the next index.
*/
-void **__must_check radix_tree_iter_resume(void **slot,
+void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot,
struct radix_tree_iter *iter);
/**
@@ -445,11 +490,11 @@ radix_tree_chunk_size(struct radix_tree_iter *iter)
}
#ifdef CONFIG_RADIX_TREE_MULTIORDER
-void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter,
- unsigned flags);
+void __rcu **__radix_tree_next_slot(void __rcu **slot,
+ struct radix_tree_iter *iter, unsigned flags);
#else
/* Can't happen without sibling entries, but the compiler can't tell that */
-static inline void ** __radix_tree_next_slot(void **slot,
+static inline void __rcu **__radix_tree_next_slot(void __rcu **slot,
struct radix_tree_iter *iter, unsigned flags)
{
return slot;
@@ -475,8 +520,8 @@ static inline void ** __radix_tree_next_slot(void **slot,
* b) we are doing non-tagged iteration, and iter->index and iter->next_index
* have been set up so that radix_tree_chunk_size() returns 1 or 0.
*/
-static __always_inline void **
-radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
+static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot,
+ struct radix_tree_iter *iter, unsigned flags)
{
if (flags & RADIX_TREE_ITER_TAGGED) {
iter->tags >>= 1;
@@ -514,7 +559,7 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
return NULL;
found:
- if (unlikely(radix_tree_is_internal_node(*slot)))
+ if (unlikely(radix_tree_is_internal_node(rcu_dereference_raw(*slot))))
return __radix_tree_next_slot(slot, iter, flags);
return slot;
}
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index d076183e49be..9702b6e183bc 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -90,7 +90,9 @@ rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
old->rbaugmented = rbcompute(old); \
} \
rbstatic const struct rb_augment_callbacks rbname = { \
- rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
+ .propagate = rbname ## _propagate, \
+ .copy = rbname ## _copy, \
+ .rotate = rbname ## _rotate \
};
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 600aadf9cca4..0023fee4bbbc 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -1,54 +1,10 @@
#ifndef _LINUX_REFCOUNT_H
#define _LINUX_REFCOUNT_H
-/*
- * Variant of atomic_t specialized for reference counts.
- *
- * The interface matches the atomic_t interface (to aid in porting) but only
- * provides the few functions one should use for reference counting.
- *
- * It differs in that the counter saturates at UINT_MAX and will not move once
- * there. This avoids wrapping the counter and causing 'spurious'
- * use-after-free issues.
- *
- * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
- * and provide only what is strictly required for refcounts.
- *
- * The increments are fully relaxed; these will not provide ordering. The
- * rationale is that whatever is used to obtain the object we're increasing the
- * reference count on will provide the ordering. For locked data structures,
- * its the lock acquire, for RCU/lockless data structures its the dependent
- * load.
- *
- * Do note that inc_not_zero() provides a control dependency which will order
- * future stores against the inc, this ensures we'll never modify the object
- * if we did not in fact acquire a reference.
- *
- * The decrements will provide release order, such that all the prior loads and
- * stores will be issued before, it also provides a control dependency, which
- * will order us against the subsequent free().
- *
- * The control dependency is against the load of the cmpxchg (ll/sc) that
- * succeeded. This means the stores aren't fully ordered, but this is fine
- * because the 1->0 transition indicates no concurrency.
- *
- * Note that the allocator is responsible for ordering things between free()
- * and alloc().
- *
- */
-
#include <linux/atomic.h>
-#include <linux/bug.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
-
-#ifdef CONFIG_DEBUG_REFCOUNT
-#define REFCOUNT_WARN(cond, str) WARN_ON(cond)
-#define __refcount_check __must_check
-#else
-#define REFCOUNT_WARN(cond, str) (void)(cond)
-#define __refcount_check
-#endif
+#include <linux/kernel.h>
typedef struct refcount_struct {
atomic_t refs;
@@ -66,229 +22,21 @@ static inline unsigned int refcount_read(const refcount_t *r)
return atomic_read(&r->refs);
}
-static inline __refcount_check
-bool refcount_add_not_zero(unsigned int i, refcount_t *r)
-{
- unsigned int old, new, val = atomic_read(&r->refs);
-
- for (;;) {
- if (!val)
- return false;
-
- if (unlikely(val == UINT_MAX))
- return true;
-
- new = val + i;
- if (new < val)
- new = UINT_MAX;
- old = atomic_cmpxchg_relaxed(&r->refs, val, new);
- if (old == val)
- break;
-
- val = old;
- }
-
- REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
-
- return true;
-}
-
-static inline void refcount_add(unsigned int i, refcount_t *r)
-{
- REFCOUNT_WARN(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
-}
-
-/*
- * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
- *
- * Provides no memory ordering, it is assumed the caller has guaranteed the
- * object memory to be stable (RCU, etc.). It does provide a control dependency
- * and thereby orders future stores. See the comment on top.
- */
-static inline __refcount_check
-bool refcount_inc_not_zero(refcount_t *r)
-{
- unsigned int old, new, val = atomic_read(&r->refs);
-
- for (;;) {
- new = val + 1;
-
- if (!val)
- return false;
-
- if (unlikely(!new))
- return true;
-
- old = atomic_cmpxchg_relaxed(&r->refs, val, new);
- if (old == val)
- break;
-
- val = old;
- }
-
- REFCOUNT_WARN(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
-
- return true;
-}
-
-/*
- * Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
- *
- * Provides no memory ordering, it is assumed the caller already has a
- * reference on the object, will WARN when this is not so.
- */
-static inline void refcount_inc(refcount_t *r)
-{
- REFCOUNT_WARN(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
-}
-
-/*
- * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
- * decrement when saturated at UINT_MAX.
- *
- * Provides release memory ordering, such that prior loads and stores are done
- * before, and provides a control dependency such that free() must come after.
- * See the comment on top.
- */
-static inline __refcount_check
-bool refcount_sub_and_test(unsigned int i, refcount_t *r)
-{
- unsigned int old, new, val = atomic_read(&r->refs);
-
- for (;;) {
- if (unlikely(val == UINT_MAX))
- return false;
-
- new = val - i;
- if (new > val) {
- REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
- return false;
- }
-
- old = atomic_cmpxchg_release(&r->refs, val, new);
- if (old == val)
- break;
-
- val = old;
- }
-
- return !new;
-}
-
-static inline __refcount_check
-bool refcount_dec_and_test(refcount_t *r)
-{
- return refcount_sub_and_test(1, r);
-}
+extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r);
+extern void refcount_add(unsigned int i, refcount_t *r);
-/*
- * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
- * when saturated at UINT_MAX.
- *
- * Provides release memory ordering, such that prior loads and stores are done
- * before.
- */
-static inline
-void refcount_dec(refcount_t *r)
-{
- REFCOUNT_WARN(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
-}
-
-/*
- * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
- * success thereof.
- *
- * Like all decrement operations, it provides release memory order and provides
- * a control dependency.
- *
- * It can be used like a try-delete operator; this explicit case is provided
- * and not cmpxchg in generic, because that would allow implementing unsafe
- * operations.
- */
-static inline __refcount_check
-bool refcount_dec_if_one(refcount_t *r)
-{
- return atomic_cmpxchg_release(&r->refs, 1, 0) == 1;
-}
-
-/*
- * No atomic_t counterpart, it decrements unless the value is 1, in which case
- * it will return false.
- *
- * Was often done like: atomic_add_unless(&var, -1, 1)
- */
-static inline __refcount_check
-bool refcount_dec_not_one(refcount_t *r)
-{
- unsigned int old, new, val = atomic_read(&r->refs);
+extern __must_check bool refcount_inc_not_zero(refcount_t *r);
+extern void refcount_inc(refcount_t *r);
- for (;;) {
- if (unlikely(val == UINT_MAX))
- return true;
+extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r);
+extern void refcount_sub(unsigned int i, refcount_t *r);
- if (val == 1)
- return false;
+extern __must_check bool refcount_dec_and_test(refcount_t *r);
+extern void refcount_dec(refcount_t *r);
- new = val - 1;
- if (new > val) {
- REFCOUNT_WARN(new > val, "refcount_t: underflow; use-after-free.\n");
- return true;
- }
-
- old = atomic_cmpxchg_release(&r->refs, val, new);
- if (old == val)
- break;
-
- val = old;
- }
-
- return true;
-}
-
-/*
- * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
- * to decrement when saturated at UINT_MAX.
- *
- * Provides release memory ordering, such that prior loads and stores are done
- * before, and provides a control dependency such that free() must come after.
- * See the comment on top.
- */
-static inline __refcount_check
-bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
-{
- if (refcount_dec_not_one(r))
- return false;
-
- mutex_lock(lock);
- if (!refcount_dec_and_test(r)) {
- mutex_unlock(lock);
- return false;
- }
-
- return true;
-}
-
-/*
- * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
- * decrement when saturated at UINT_MAX.
- *
- * Provides release memory ordering, such that prior loads and stores are done
- * before, and provides a control dependency such that free() must come after.
- * See the comment on top.
- */
-static inline __refcount_check
-bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
-{
- if (refcount_dec_not_one(r))
- return false;
-
- spin_lock(lock);
- if (!refcount_dec_and_test(r)) {
- spin_unlock(lock);
- return false;
- }
-
- return true;
-}
+extern __must_check bool refcount_dec_if_one(refcount_t *r);
+extern __must_check bool refcount_dec_not_one(refcount_t *r);
+extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
+extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
#endif /* _LINUX_REFCOUNT_H */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 15321fb1df6b..8c89e902df3e 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -9,6 +9,7 @@
#include <linux/mm.h>
#include <linux/rwsem.h>
#include <linux/memcontrol.h>
+#include <linux/highmem.h>
/*
* The anon_vma heads a list of private "related" vmas, to scan if
@@ -196,41 +197,30 @@ int page_referenced(struct page *, int is_locked,
int try_to_unmap(struct page *, enum ttu_flags flags);
-/*
- * Used by uprobes to replace a userspace page safely
- */
-pte_t *__page_check_address(struct page *, struct mm_struct *,
- unsigned long, spinlock_t **, int);
-
-static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm,
- unsigned long address,
- spinlock_t **ptlp, int sync)
-{
- pte_t *ptep;
+/* Avoid racy checks */
+#define PVMW_SYNC (1 << 0)
+/* Look for migarion entries rather than present PTEs */
+#define PVMW_MIGRATION (1 << 1)
- __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address,
- ptlp, sync));
- return ptep;
-}
+struct page_vma_mapped_walk {
+ struct page *page;
+ struct vm_area_struct *vma;
+ unsigned long address;
+ pmd_t *pmd;
+ pte_t *pte;
+ spinlock_t *ptl;
+ unsigned int flags;
+};
-/*
- * Used by idle page tracking to check if a page was referenced via page
- * tables.
- */
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-bool page_check_address_transhuge(struct page *page, struct mm_struct *mm,
- unsigned long address, pmd_t **pmdp,
- pte_t **ptep, spinlock_t **ptlp);
-#else
-static inline bool page_check_address_transhuge(struct page *page,
- struct mm_struct *mm, unsigned long address,
- pmd_t **pmdp, pte_t **ptep, spinlock_t **ptlp)
+static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
{
- *ptep = page_check_address(page, mm, address, ptlp, 0);
- *pmdp = NULL;
- return !!*ptep;
+ if (pvmw->pte)
+ pte_unmap(pvmw->pte);
+ if (pvmw->ptl)
+ spin_unlock(pvmw->ptl);
}
-#endif
+
+bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
/*
* Used by swapoff to help locate where page is expected in vma.
diff --git a/include/linux/rodata_test.h b/include/linux/rodata_test.h
new file mode 100644
index 000000000000..ea05f6c51413
--- /dev/null
+++ b/include/linux/rodata_test.h
@@ -0,0 +1,23 @@
+/*
+ * rodata_test.h: functional test for mark_rodata_ro function
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author: Arjan van de Ven <arjan@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#ifndef _RODATA_TEST_H
+#define _RODATA_TEST_H
+
+#ifdef CONFIG_DEBUG_RODATA_TEST
+extern const int rodata_test_data;
+void rodata_test(void);
+#else
+static inline void rodata_test(void) {}
+#endif
+
+#endif /* _RODATA_TEST_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 451e241f32c5..4a28deb5f210 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2904,6 +2904,28 @@ static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
*/
extern struct mm_struct * mm_alloc(void);
+/**
+ * mmgrab() - Pin a &struct mm_struct.
+ * @mm: The &struct mm_struct to pin.
+ *
+ * Make sure that @mm will not get freed even after the owning task
+ * exits. This doesn't guarantee that the associated address space
+ * will still exist later on and mmget_not_zero() has to be used before
+ * accessing it.
+ *
+ * This is a preferred way to to pin @mm for a longer/unbounded amount
+ * of time.
+ *
+ * Use mmdrop() to release the reference acquired by mmgrab().
+ *
+ * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
+ * of &mm_struct.mm_count vs &mm_struct.mm_users.
+ */
+static inline void mmgrab(struct mm_struct *mm)
+{
+ atomic_inc(&mm->mm_count);
+}
+
/* mmdrop drops the mm and the page tables */
extern void __mmdrop(struct mm_struct *);
static inline void mmdrop(struct mm_struct *mm)
@@ -2926,6 +2948,27 @@ static inline void mmdrop_async(struct mm_struct *mm)
}
}
+/**
+ * mmget() - Pin the address space associated with a &struct mm_struct.
+ * @mm: The address space to pin.
+ *
+ * Make sure that the address space of the given &struct mm_struct doesn't
+ * go away. This does not protect against parts of the address space being
+ * modified or freed, however.
+ *
+ * Never use this function to pin this address space for an
+ * unbounded/indefinite amount of time.
+ *
+ * Use mmput() to release the reference acquired by mmget().
+ *
+ * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
+ * of &mm_struct.mm_count vs &mm_struct.mm_users.
+ */
+static inline void mmget(struct mm_struct *mm)
+{
+ atomic_inc(&mm->mm_users);
+}
+
static inline bool mmget_not_zero(struct mm_struct *mm)
{
return atomic_inc_not_zero(&mm->mm_users);
diff --git a/include/linux/sed-opal.h b/include/linux/sed-opal.h
index deee23d012e7..04b124fca51e 100644
--- a/include/linux/sed-opal.h
+++ b/include/linux/sed-opal.h
@@ -27,6 +27,7 @@ typedef int (sec_send_recv)(void *data, u16 spsp, u8 secp, void *buffer,
size_t len, bool send);
#ifdef CONFIG_BLK_SED_OPAL
+void free_opal_dev(struct opal_dev *dev);
bool opal_unlock_from_suspend(struct opal_dev *dev);
struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv);
int sed_ioctl(struct opal_dev *dev, unsigned int cmd, void __user *ioctl_ptr);
@@ -51,6 +52,10 @@ static inline bool is_sed_ioctl(unsigned int cmd)
return false;
}
#else
+static inline void free_opal_dev(struct opal_dev *dev)
+{
+}
+
static inline bool is_sed_ioctl(unsigned int cmd)
{
return false;
diff --git a/include/linux/sem.h b/include/linux/sem.h
index d0efd6e6c20a..4fc222f8755d 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -21,7 +21,7 @@ struct sem_array {
struct list_head list_id; /* undo requests on this array */
int sem_nsems; /* no. of semaphores in array */
int complex_count; /* pending complex operations */
- bool complex_mode; /* no parallel simple ops */
+ unsigned int use_global_lock;/* >0: global lock required */
};
#ifdef CONFIG_SYSVIPC
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index fdaac9d4d46d..a7d6bd2a918f 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -57,7 +57,14 @@ extern int shmem_zero_setup(struct vm_area_struct *);
extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
+#ifdef CONFIG_SHMEM
extern bool shmem_mapping(struct address_space *mapping);
+#else
+static inline bool shmem_mapping(struct address_space *mapping)
+{
+ return false;
+}
+#endif /* CONFIG_SHMEM */
extern void shmem_unlock_mapping(struct address_space *mapping);
extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
diff --git a/include/linux/spi/flash.h b/include/linux/spi/flash.h
index 3f22932e67a4..f4199e758f97 100644
--- a/include/linux/spi/flash.h
+++ b/include/linux/spi/flash.h
@@ -7,7 +7,7 @@ struct mtd_partition;
* struct flash_platform_data: board-specific flash data
* @name: optional flash device name (eg, as used with mtdparts=)
* @parts: optional array of mtd_partitions for static partitioning
- * @nr_parts: number of mtd_partitions for static partitoning
+ * @nr_parts: number of mtd_partitions for static partitioning
* @type: optional flash device type (e.g. m25p80 vs m25p64), for use
* with chips that can't be queried for JEDEC or other IDs
*
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 8a511c0985aa..20d157a518a7 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -204,8 +204,11 @@ static inline void cache_put(struct cache_head *h, struct cache_detail *cd)
kref_put(&h->ref, cd->cache_put);
}
-static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h)
+static inline bool cache_is_expired(struct cache_detail *detail, struct cache_head *h)
{
+ if (!test_bit(CACHE_VALID, &h->flags))
+ return false;
+
return (h->expiry_time < seconds_since_boot()) ||
(detail->flush_time >= h->last_refresh);
}
@@ -227,6 +230,7 @@ extern void sunrpc_destroy_cache_detail(struct cache_detail *cd);
extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *,
umode_t, struct cache_detail *);
extern void sunrpc_cache_unregister_pipefs(struct cache_detail *);
+extern void sunrpc_cache_unhash(struct cache_detail *, struct cache_head *);
/* Must store cache_detail in seq_file->private if using next three functions */
extern void *cache_seq_start(struct seq_file *file, loff_t *pos);
diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h
index cfda6adcf33c..245fc59b7324 100644
--- a/include/linux/sunrpc/rpc_rdma.h
+++ b/include/linux/sunrpc/rpc_rdma.h
@@ -110,6 +110,15 @@ struct rpcrdma_msg {
};
/*
+ * XDR sizes, in quads
+ */
+enum {
+ rpcrdma_fixed_maxsz = 4,
+ rpcrdma_segment_maxsz = 4,
+ rpcrdma_readchunk_maxsz = 2 + rpcrdma_segment_maxsz,
+};
+
+/*
* Smallest RPC/RDMA header: rm_xid through rm_type, then rm_nochunks
*/
#define RPCRDMA_HDRLEN_MIN (sizeof(__be32) * 7)
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 7321ae933867..e770abeed32d 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -400,10 +400,14 @@ struct svc_version {
struct svc_procedure * vs_proc; /* per-procedure info */
u32 vs_xdrsize; /* xdrsize needed for this version */
- unsigned int vs_hidden : 1, /* Don't register with portmapper.
- * Only used for nfsacl so far. */
- vs_rpcb_optnl:1;/* Don't care the result of register.
- * Only used for nfsv4. */
+ /* Don't register with rpcbind */
+ bool vs_hidden;
+
+ /* Don't care if the rpcbind registration fails */
+ bool vs_rpcb_optnl;
+
+ /* Need xprt with congestion control */
+ bool vs_need_cong_ctrl;
/* Override dispatch function (e.g. when caching replies).
* A return value of 0 means drop the request.
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 757fb963696c..b105f73e3ca2 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -70,7 +70,7 @@ extern atomic_t rdma_stat_sq_prod;
* completes.
*/
struct svc_rdma_op_ctxt {
- struct list_head free;
+ struct list_head list;
struct svc_rdma_op_ctxt *read_hdr;
struct svc_rdma_fastreg_mr *frmr;
int hdr_count;
@@ -78,7 +78,6 @@ struct svc_rdma_op_ctxt {
struct ib_cqe cqe;
struct ib_cqe reg_cqe;
struct ib_cqe inv_cqe;
- struct list_head dto_q;
u32 byte_len;
u32 position;
struct svcxprt_rdma *xprt;
@@ -141,7 +140,8 @@ struct svcxprt_rdma {
atomic_t sc_sq_avail; /* SQEs ready to be consumed */
unsigned int sc_sq_depth; /* Depth of SQ */
unsigned int sc_rq_depth; /* Depth of RQ */
- u32 sc_max_requests; /* Forward credits */
+ __be32 sc_fc_credits; /* Forward credits */
+ u32 sc_max_requests; /* Max requests */
u32 sc_max_bc_requests;/* Backward credits */
int sc_max_req_size; /* Size of each RQ WR buf */
@@ -171,7 +171,6 @@ struct svcxprt_rdma {
wait_queue_head_t sc_send_wait; /* SQ exhaustion waitlist */
unsigned long sc_flags;
- struct list_head sc_dto_q; /* DTO tasklet I/O pending Q */
struct list_head sc_read_complete_q;
struct work_struct sc_work;
};
@@ -214,11 +213,7 @@ extern void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *, int);
extern void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *, int);
extern void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *, int,
__be32, __be64, u32);
-extern void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *,
- struct rpcrdma_msg *,
- struct rpcrdma_msg *,
- enum rpcrdma_proc);
-extern int svc_rdma_xdr_get_reply_hdr_len(struct rpcrdma_msg *);
+extern unsigned int svc_rdma_xdr_get_reply_hdr_len(__be32 *rdma_resp);
/* svc_rdma_recvfrom.c */
extern int svc_rdma_recvfrom(struct svc_rqst *);
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 7440290f64ac..ddb7f94a9d06 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -67,6 +67,7 @@ struct svc_xprt {
#define XPT_CACHE_AUTH 11 /* cache auth info */
#define XPT_LOCAL 12 /* connection from loopback interface */
#define XPT_KILL_TEMP 13 /* call xpo_kill_temp_xprt before closing */
+#define XPT_CONG_CTRL 14 /* has congestion control */
struct svc_serv *xpt_server; /* service for transport */
atomic_t xpt_reserved; /* space on outq that is rsvd */
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 5a209b84fd9e..c7bdf895179c 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -61,6 +61,8 @@ struct timer_list {
#define TIMER_ARRAYSHIFT 22
#define TIMER_ARRAYMASK 0xFFC00000
+#define TIMER_TRACE_FLAGMASK (TIMER_MIGRATING | TIMER_DEFERRABLE | TIMER_PINNED | TIMER_IRQSAFE)
+
#define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
.entry = { .next = TIMER_ENTRY_STATIC }, \
.function = (_function), \
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index f431861f22f1..0468548acebf 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -61,10 +61,18 @@ extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *,
unsigned long from, unsigned long to,
unsigned long len);
-extern void madvise_userfault_dontneed(struct vm_area_struct *vma,
- struct vm_area_struct **prev,
- unsigned long start,
- unsigned long end);
+extern void userfaultfd_remove(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
+ unsigned long start,
+ unsigned long end);
+
+extern int userfaultfd_unmap_prep(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct list_head *uf);
+extern void userfaultfd_unmap_complete(struct mm_struct *mm,
+ struct list_head *uf);
+
+extern void userfaultfd_exit(struct mm_struct *mm);
#else /* CONFIG_USERFAULTFD */
@@ -112,12 +120,29 @@ static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx,
{
}
-static inline void madvise_userfault_dontneed(struct vm_area_struct *vma,
- struct vm_area_struct **prev,
- unsigned long start,
- unsigned long end)
+static inline void userfaultfd_remove(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
+ unsigned long start,
+ unsigned long end)
{
}
+
+static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct list_head *uf)
+{
+ return 0;
+}
+
+static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
+ struct list_head *uf)
+{
+}
+
+static inline void userfaultfd_exit(struct mm_struct *mm)
+{
+}
+
#endif /* CONFIG_USERFAULTFD */
#endif /* _LINUX_USERFAULTFD_K_H */
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index 35a4d8185b51..a786e5e8973b 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -117,6 +117,7 @@ struct watchdog_device {
#define WDOG_NO_WAY_OUT 1 /* Is 'nowayout' feature set ? */
#define WDOG_STOP_ON_REBOOT 2 /* Should be stopped on reboot */
#define WDOG_HW_RUNNING 3 /* True if HW watchdog running */
+#define WDOG_STOP_ON_UNREGISTER 4 /* Should be stopped on unregister */
struct list_head deferred;
};
@@ -151,6 +152,12 @@ static inline void watchdog_stop_on_reboot(struct watchdog_device *wdd)
set_bit(WDOG_STOP_ON_REBOOT, &wdd->status);
}
+/* Use the following function to stop the watchdog when unregistering it */
+static inline void watchdog_stop_on_unregister(struct watchdog_device *wdd)
+{
+ set_bit(WDOG_STOP_ON_UNREGISTER, &wdd->status);
+}
+
/* Use the following function to check if a timeout value is invalid */
static inline bool watchdog_timeout_invalid(struct watchdog_device *wdd, unsigned int t)
{
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index a26cc437293c..bde063cefd04 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -106,9 +106,9 @@ struct work_struct {
#endif
};
-#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL)
+#define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL)
#define WORK_DATA_STATIC_INIT() \
- ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)
+ ATOMIC_LONG_INIT((unsigned long)(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC))
struct delayed_work {
struct work_struct work;
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 5527d910ba3d..a3c0cbd7c888 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -46,7 +46,7 @@ enum writeback_sync_modes {
*/
enum wb_reason {
WB_REASON_BACKGROUND,
- WB_REASON_TRY_TO_FREE_PAGES,
+ WB_REASON_VMSCAN,
WB_REASON_SYNC,
WB_REASON_PERIODIC,
WB_REASON_LAPTOP_TIMER,