|
@@ -3614,6 +3614,46 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
|
|
|
EXPORT_SYMBOL(__kmalloc_node);
|
|
|
#endif
|
|
|
|
|
|
+#ifdef CONFIG_HARDENED_USERCOPY
|
|
|
+/*
|
|
|
+ * Rejects objects that are incorrectly sized.
|
|
|
+ *
|
|
|
+ * Returns NULL if check passes, otherwise const char * to name of cache
|
|
|
+ * to indicate an error.
|
|
|
+ */
|
|
|
+const char *__check_heap_object(const void *ptr, unsigned long n,
|
|
|
+ struct page *page)
|
|
|
+{
|
|
|
+ struct kmem_cache *s;
|
|
|
+ unsigned long offset;
|
|
|
+ size_t object_size;
|
|
|
+
|
|
|
+ /* Find object and usable object size. */
|
|
|
+ s = page->slab_cache;
|
|
|
+ object_size = slab_ksize(s);
|
|
|
+
|
|
|
+ /* Reject impossible pointers. */
|
|
|
+ if (ptr < page_address(page))
|
|
|
+ return s->name;
|
|
|
+
|
|
|
+ /* Find offset within object. */
|
|
|
+ offset = (ptr - page_address(page)) % s->size;
|
|
|
+
|
|
|
+ /* Adjust for redzone and reject if within the redzone. */
|
|
|
+ if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
|
|
|
+ if (offset < s->red_left_pad)
|
|
|
+ return s->name;
|
|
|
+ offset -= s->red_left_pad;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Allow address range falling entirely within object size. */
|
|
|
+ if (offset <= object_size && n <= object_size - offset)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ return s->name;
|
|
|
+}
|
|
|
+#endif /* CONFIG_HARDENED_USERCOPY */
|
|
|
+
|
|
|
static size_t __ksize(const void *object)
|
|
|
{
|
|
|
struct page *page;
|