new file mode 100644
@@ -0,0 +1,44 @@
+#ifndef _LINUX_VRANGE_H
+#define _LINUX_VRANGE_H
+
+#include <linux/vrange_types.h>
+#include <linux/mm.h>
+
+#define vrange_entry(ptr) \
+ container_of(ptr, struct vrange, node.rb)
+
+#ifdef CONFIG_MMU
+
+static inline void vrange_root_init(struct vrange_root *vroot, int type)
+{
+ vroot->type = type;
+ vroot->v_rb = RB_ROOT;
+ mutex_init(&vroot->v_lock);
+}
+
+static inline void vrange_lock(struct vrange_root *vroot)
+{
+ mutex_lock(&vroot->v_lock);
+}
+
+static inline void vrange_unlock(struct vrange_root *vroot)
+{
+ mutex_unlock(&vroot->v_lock);
+}
+
+static inline int vrange_type(struct vrange *vrange)
+{
+ return vrange->owner->type;
+}
+
+void vrange_init(void);
+extern void vrange_root_cleanup(struct vrange_root *vroot);
+
+#else
+
+static inline void vrange_init(void) {};
+static inline void vrange_root_init(struct vrange_root *vroot, int type) {};
+static inline void vrange_root_cleanup(struct vrange_root *vroot) {};
+
+#endif
+#endif /* _LINIUX_VRANGE_H */
new file mode 100644
@@ -0,0 +1,19 @@
+#ifndef _LINUX_VRANGE_TYPES_H
+#define _LINUX_VRANGE_TYPES_H
+
+#include <linux/mutex.h>
+#include <linux/interval_tree.h>
+
+struct vrange_root {
+ struct rb_root v_rb; /* vrange rb tree */
+ struct mutex v_lock; /* Protect v_rb */
+ enum {VRANGE_MM, VRANGE_FILE} type; /* range root type */
+};
+
+struct vrange {
+ struct interval_tree_node node;
+ struct vrange_root *owner;
+ int purged;
+};
+#endif
+
@@ -74,6 +74,7 @@
#include <linux/ptrace.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
+#include <linux/vrange.h>
#include <asm/io.h>
#include <asm/bugs.h>
@@ -601,6 +602,7 @@ asmlinkage void __init start_kernel(void)
calibrate_delay();
pidmap_init();
anon_vma_init();
+ vrange_init();
#ifdef CONFIG_X86
if (efi_enabled(EFI_RUNTIME_SERVICES))
efi_enter_virtual_mode();
@@ -13,7 +13,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
- earlycpio.o
+ earlycpio.o interval_tree.o
obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
lib-$(CONFIG_MMU) += ioremap.o
@@ -5,7 +5,7 @@
mmu-y := nommu.o
mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \
mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
- vmalloc.o pagewalk.o pgtable-generic.o
+ vmalloc.o pagewalk.o pgtable-generic.o vrange.o
ifdef CONFIG_CROSS_MEMORY_ATTACH
mmu-$(CONFIG_MMU) += process_vm_access.o
new file mode 100644
@@ -0,0 +1,181 @@
+/*
+ * mm/vrange.c
+ */
+
+#include <linux/vrange.h>
+#include <linux/slab.h>
+
+static struct kmem_cache *vrange_cachep;
+
+void __init vrange_init(void)
+{
+ vrange_cachep = KMEM_CACHE(vrange, SLAB_PANIC);
+}
+
+static struct vrange *__vrange_alloc(gfp_t flags)
+{
+ struct vrange *vrange = kmem_cache_alloc(vrange_cachep, flags);
+ if (!vrange)
+ return vrange;
+ vrange->owner = NULL;
+ return vrange;
+}
+
+static void __vrange_free(struct vrange *range)
+{
+ WARN_ON(range->owner);
+ kmem_cache_free(vrange_cachep, range);
+}
+
+static void __vrange_add(struct vrange *range, struct vrange_root *vroot)
+{
+ range->owner = vroot;
+ interval_tree_insert(&range->node, &vroot->v_rb);
+}
+
+static void __vrange_remove(struct vrange *range)
+{
+ interval_tree_remove(&range->node, &range->owner->v_rb);
+ range->owner = NULL;
+}
+
+static inline void __vrange_set(struct vrange *range,
+ unsigned long start_idx, unsigned long end_idx,
+ bool purged)
+{
+ range->node.start = start_idx;
+ range->node.last = end_idx;
+ range->purged = purged;
+}
+
+static inline void __vrange_resize(struct vrange *range,
+ unsigned long start_idx, unsigned long end_idx)
+{
+ struct vrange_root *vroot = range->owner;
+ bool purged = range->purged;
+
+ __vrange_remove(range);
+ __vrange_set(range, start_idx, end_idx, purged);
+ __vrange_add(range, vroot);
+}
+
+static int vrange_add(struct vrange_root *vroot,
+ unsigned long start_idx, unsigned long end_idx)
+{
+ struct vrange *new_range, *range;
+ struct interval_tree_node *node, *next;
+ int purged = 0;
+
+ new_range = __vrange_alloc(GFP_KERNEL);
+ if (!new_range)
+ return -ENOMEM;
+
+ vrange_lock(vroot);
+
+ node = interval_tree_iter_first(&vroot->v_rb, start_idx, end_idx);
+ while (node) {
+ next = interval_tree_iter_next(node, start_idx, end_idx);
+ range = container_of(node, struct vrange, node);
+ /* old range covers new range fully */
+ if (node->start <= start_idx && node->last >= end_idx) {
+ __vrange_free(new_range);
+ goto out;
+ }
+
+ start_idx = min_t(unsigned long, start_idx, node->start);
+ end_idx = max_t(unsigned long, end_idx, node->last);
+ purged |= range->purged;
+
+ __vrange_remove(range);
+ __vrange_free(range);
+
+ node = next;
+ }
+
+ __vrange_set(new_range, start_idx, end_idx, purged);
+ __vrange_add(new_range, vroot);
+out:
+ vrange_unlock(vroot);
+ return 0;
+}
+
+static int vrange_remove(struct vrange_root *vroot,
+ unsigned long start_idx, unsigned long end_idx,
+ int *purged)
+{
+ struct vrange *new_range, *range;
+ struct interval_tree_node *node, *next;
+ bool used_new = false;
+
+ if (!purged)
+ return -EINVAL;
+
+ *purged = 0;
+
+ new_range = __vrange_alloc(GFP_KERNEL);
+ if (!new_range)
+ return -ENOMEM;
+
+ vrange_lock(vroot);
+
+ node = interval_tree_iter_first(&vroot->v_rb, start_idx, end_idx);
+ while (node) {
+ next = interval_tree_iter_next(node, start_idx, end_idx);
+ range = container_of(node, struct vrange, node);
+
+ *purged |= range->purged;
+
+ if (start_idx <= node->start && end_idx >= node->last) {
+ /* argumented range covers the range fully */
+ __vrange_remove(range);
+ __vrange_free(range);
+ } else if (node->start >= start_idx) {
+ /*
+ * Argumented range covers over the left of the
+ * range
+ */
+ __vrange_resize(range, end_idx + 1, node->last);
+ } else if (node->last <= end_idx) {
+ /*
+ * Argumented range covers over the right of the
+ * range
+ */
+ __vrange_resize(range, node->start, start_idx - 1);
+ } else {
+ /*
+ * Argumented range is middle of the range
+ */
+ used_new = true;
+ __vrange_resize(range, node->start, start_idx - 1);
+ __vrange_set(new_range, end_idx + 1, node->last,
+ range->purged);
+ __vrange_add(new_range, vroot);
+ break;
+ }
+
+ node = next;
+ }
+ vrange_unlock(vroot);
+
+ if (!used_new)
+ __vrange_free(new_range);
+
+ return 0;
+}
+
+void vrange_root_cleanup(struct vrange_root *vroot)
+{
+ struct vrange *range;
+ struct rb_node *next;
+
+ vrange_lock(vroot);
+ next = rb_first(&vroot->v_rb);
+ while (next) {
+ range = vrange_entry(next);
+ next = rb_next(next);
+ __vrange_remove(range);
+ __vrange_free(range);
+ }
+ vrange_unlock(vroot);
+}
+