|
@@ -57,11 +57,17 @@ struct ldt_struct {
|
|
|
/*
|
|
|
* Used for LDT copy/destruction.
|
|
|
*/
|
|
|
-int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm);
|
|
|
+static inline void init_new_context_ldt(struct mm_struct *mm)
|
|
|
+{
|
|
|
+ mm->context.ldt = NULL;
|
|
|
+ init_rwsem(&mm->context.ldt_usr_sem);
|
|
|
+}
|
|
|
+int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
|
|
|
void destroy_context_ldt(struct mm_struct *mm);
|
|
|
#else /* CONFIG_MODIFY_LDT_SYSCALL */
|
|
|
-static inline int init_new_context_ldt(struct task_struct *tsk,
|
|
|
- struct mm_struct *mm)
|
|
|
+static inline void init_new_context_ldt(struct mm_struct *mm) { }
|
|
|
+static inline int ldt_dup_context(struct mm_struct *oldmm,
|
|
|
+ struct mm_struct *mm)
|
|
|
{
|
|
|
return 0;
|
|
|
}
|
|
@@ -137,15 +143,16 @@ static inline int init_new_context(struct task_struct *tsk,
|
|
|
mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
|
|
|
atomic64_set(&mm->context.tlb_gen, 0);
|
|
|
|
|
|
- #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
|
|
+#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
|
|
if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
|
|
|
/* pkey 0 is the default and always allocated */
|
|
|
mm->context.pkey_allocation_map = 0x1;
|
|
|
/* -1 means unallocated or invalid */
|
|
|
mm->context.execute_only_pkey = -1;
|
|
|
}
|
|
|
- #endif
|
|
|
- return init_new_context_ldt(tsk, mm);
|
|
|
+#endif
|
|
|
+ init_new_context_ldt(mm);
|
|
|
+ return 0;
|
|
|
}
|
|
|
static inline void destroy_context(struct mm_struct *mm)
|
|
|
{
|
|
@@ -181,7 +188,7 @@ do { \
|
|
|
static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
|
|
|
{
|
|
|
paravirt_arch_dup_mmap(oldmm, mm);
|
|
|
- return 0;
|
|
|
+ return ldt_dup_context(oldmm, mm);
|
|
|
}
|
|
|
|
|
|
static inline void arch_exit_mmap(struct mm_struct *mm)
|