|
|
@@ -458,6 +458,21 @@ static void set_max_threads(unsigned int max_threads_suggested)
|
|
|
int arch_task_struct_size __read_mostly;
|
|
|
#endif
|
|
|
|
|
|
+static void task_struct_whitelist(unsigned long *offset, unsigned long *size)
|
|
|
+{
|
|
|
+ /* Fetch thread_struct whitelist for the architecture. */
|
|
|
+ arch_thread_struct_whitelist(offset, size);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Handle zero-sized whitelist or empty thread_struct, otherwise
|
|
|
+ * adjust offset to position of thread_struct in task_struct.
|
|
|
+ */
|
|
|
+ if (unlikely(*size == 0))
|
|
|
+ *offset = 0;
|
|
|
+ else
|
|
|
+ *offset += offsetof(struct task_struct, thread);
|
|
|
+}
|
|
|
+
|
|
|
void __init fork_init(void)
|
|
|
{
|
|
|
int i;
|
|
|
@@ -466,11 +481,14 @@ void __init fork_init(void)
|
|
|
#define ARCH_MIN_TASKALIGN 0
|
|
|
#endif
|
|
|
int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN);
|
|
|
+ unsigned long useroffset, usersize;
|
|
|
|
|
|
/* create a slab on which task_structs can be allocated */
|
|
|
- task_struct_cachep = kmem_cache_create("task_struct",
|
|
|
+ task_struct_whitelist(&useroffset, &usersize);
|
|
|
+ task_struct_cachep = kmem_cache_create_usercopy("task_struct",
|
|
|
arch_task_struct_size, align,
|
|
|
- SLAB_PANIC|SLAB_ACCOUNT, NULL);
|
|
|
+ SLAB_PANIC|SLAB_ACCOUNT,
|
|
|
+ useroffset, usersize, NULL);
|
|
|
#endif
|
|
|
|
|
|
/* do the arch specific task caches init */
|