|
@@ -76,14 +76,14 @@ extern unsigned long pgd_current[];
|
|
|
* All unused by hardware upper bits will be considered
|
|
* All unused by hardware upper bits will be considered
|
|
|
* as a software asid extension.
|
|
* as a software asid extension.
|
|
|
*/
|
|
*/
|
|
|
-static unsigned long asid_version_mask(unsigned int cpu)
|
|
|
|
|
|
|
+static inline u64 asid_version_mask(unsigned int cpu)
|
|
|
{
|
|
{
|
|
|
unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
|
|
unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
|
|
|
|
|
|
|
|
- return ~(asid_mask | (asid_mask - 1));
|
|
|
|
|
|
|
+ return ~(u64)(asid_mask | (asid_mask - 1));
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-static unsigned long asid_first_version(unsigned int cpu)
|
|
|
|
|
|
|
+static inline u64 asid_first_version(unsigned int cpu)
|
|
|
{
|
|
{
|
|
|
return ~asid_version_mask(cpu) + 1;
|
|
return ~asid_version_mask(cpu) + 1;
|
|
|
}
|
|
}
|
|
@@ -102,14 +102,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
|
|
static inline void
|
|
static inline void
|
|
|
get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
|
|
get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
|
|
|
{
|
|
{
|
|
|
- unsigned long asid = asid_cache(cpu);
|
|
|
|
|
|
|
+ u64 asid = asid_cache(cpu);
|
|
|
|
|
|
|
|
if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
|
|
if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
|
|
|
if (cpu_has_vtag_icache)
|
|
if (cpu_has_vtag_icache)
|
|
|
flush_icache_all();
|
|
flush_icache_all();
|
|
|
local_flush_tlb_all(); /* start new asid cycle */
|
|
local_flush_tlb_all(); /* start new asid cycle */
|
|
|
- if (!asid) /* fix version if needed */
|
|
|
|
|
- asid = asid_first_version(cpu);
|
|
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
|
|
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
|