|
@@ -55,27 +55,27 @@
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
|
enum VM_CACHE_OPS {
|
|
|
- ickill,
|
|
|
- dckill,
|
|
|
- l2kill,
|
|
|
- dccleaninva,
|
|
|
- icinva,
|
|
|
- idsync,
|
|
|
- fetch_cfg
|
|
|
+ hvmc_ickill,
|
|
|
+ hvmc_dckill,
|
|
|
+ hvmc_l2kill,
|
|
|
+ hvmc_dccleaninva,
|
|
|
+ hvmc_icinva,
|
|
|
+ hvmc_idsync,
|
|
|
+ hvmc_fetch_cfg
|
|
|
};
|
|
|
|
|
|
enum VM_INT_OPS {
|
|
|
- nop,
|
|
|
- globen,
|
|
|
- globdis,
|
|
|
- locen,
|
|
|
- locdis,
|
|
|
- affinity,
|
|
|
- get,
|
|
|
- peek,
|
|
|
- status,
|
|
|
- post,
|
|
|
- clear
|
|
|
+ hvmi_nop,
|
|
|
+ hvmi_globen,
|
|
|
+ hvmi_globdis,
|
|
|
+ hvmi_locen,
|
|
|
+ hvmi_locdis,
|
|
|
+ hvmi_affinity,
|
|
|
+ hvmi_get,
|
|
|
+ hvmi_peek,
|
|
|
+ hvmi_status,
|
|
|
+ hvmi_post,
|
|
|
+ hvmi_clear
|
|
|
};
|
|
|
|
|
|
extern void _K_VM_event_vector(void);
|
|
@@ -98,95 +98,95 @@ long __vmvpid(void);
|
|
|
|
|
|
static inline long __vmcache_ickill(void)
|
|
|
{
|
|
|
- return __vmcache(ickill, 0, 0);
|
|
|
+ return __vmcache(hvmc_ickill, 0, 0);
|
|
|
}
|
|
|
|
|
|
static inline long __vmcache_dckill(void)
|
|
|
{
|
|
|
- return __vmcache(dckill, 0, 0);
|
|
|
+ return __vmcache(hvmc_dckill, 0, 0);
|
|
|
}
|
|
|
|
|
|
static inline long __vmcache_l2kill(void)
|
|
|
{
|
|
|
- return __vmcache(l2kill, 0, 0);
|
|
|
+ return __vmcache(hvmc_l2kill, 0, 0);
|
|
|
}
|
|
|
|
|
|
static inline long __vmcache_dccleaninva(unsigned long addr, unsigned long len)
|
|
|
{
|
|
|
- return __vmcache(dccleaninva, addr, len);
|
|
|
+ return __vmcache(hvmc_dccleaninva, addr, len);
|
|
|
}
|
|
|
|
|
|
static inline long __vmcache_icinva(unsigned long addr, unsigned long len)
|
|
|
{
|
|
|
- return __vmcache(icinva, addr, len);
|
|
|
+ return __vmcache(hvmc_icinva, addr, len);
|
|
|
}
|
|
|
|
|
|
static inline long __vmcache_idsync(unsigned long addr,
|
|
|
unsigned long len)
|
|
|
{
|
|
|
- return __vmcache(idsync, addr, len);
|
|
|
+ return __vmcache(hvmc_idsync, addr, len);
|
|
|
}
|
|
|
|
|
|
static inline long __vmcache_fetch_cfg(unsigned long val)
|
|
|
{
|
|
|
- return __vmcache(fetch_cfg, val, 0);
|
|
|
+ return __vmcache(hvmc_fetch_cfg, val, 0);
|
|
|
}
|
|
|
|
|
|
/* interrupt operations */
|
|
|
|
|
|
static inline long __vmintop_nop(void)
|
|
|
{
|
|
|
- return __vmintop(nop, 0, 0, 0, 0);
|
|
|
+ return __vmintop(hvmi_nop, 0, 0, 0, 0);
|
|
|
}
|
|
|
|
|
|
static inline long __vmintop_globen(long i)
|
|
|
{
|
|
|
- return __vmintop(globen, i, 0, 0, 0);
|
|
|
+ return __vmintop(hvmi_globen, i, 0, 0, 0);
|
|
|
}
|
|
|
|
|
|
static inline long __vmintop_globdis(long i)
|
|
|
{
|
|
|
- return __vmintop(globdis, i, 0, 0, 0);
|
|
|
+ return __vmintop(hvmi_globdis, i, 0, 0, 0);
|
|
|
}
|
|
|
|
|
|
static inline long __vmintop_locen(long i)
|
|
|
{
|
|
|
- return __vmintop(locen, i, 0, 0, 0);
|
|
|
+ return __vmintop(hvmi_locen, i, 0, 0, 0);
|
|
|
}
|
|
|
|
|
|
static inline long __vmintop_locdis(long i)
|
|
|
{
|
|
|
- return __vmintop(locdis, i, 0, 0, 0);
|
|
|
+ return __vmintop(hvmi_locdis, i, 0, 0, 0);
|
|
|
}
|
|
|
|
|
|
static inline long __vmintop_affinity(long i, long cpu)
|
|
|
{
|
|
|
- return __vmintop(locdis, i, cpu, 0, 0);
|
|
|
+ return __vmintop(hvmi_affinity, i, cpu, 0, 0);
|
|
|
}
|
|
|
|
|
|
static inline long __vmintop_get(void)
|
|
|
{
|
|
|
- return __vmintop(get, 0, 0, 0, 0);
|
|
|
+ return __vmintop(hvmi_get, 0, 0, 0, 0);
|
|
|
}
|
|
|
|
|
|
static inline long __vmintop_peek(void)
|
|
|
{
|
|
|
- return __vmintop(peek, 0, 0, 0, 0);
|
|
|
+ return __vmintop(hvmi_peek, 0, 0, 0, 0);
|
|
|
}
|
|
|
|
|
|
static inline long __vmintop_status(long i)
|
|
|
{
|
|
|
- return __vmintop(status, i, 0, 0, 0);
|
|
|
+ return __vmintop(hvmi_status, i, 0, 0, 0);
|
|
|
}
|
|
|
|
|
|
static inline long __vmintop_post(long i)
|
|
|
{
|
|
|
- return __vmintop(post, i, 0, 0, 0);
|
|
|
+ return __vmintop(hvmi_post, i, 0, 0, 0);
|
|
|
}
|
|
|
|
|
|
static inline long __vmintop_clear(long i)
|
|
|
{
|
|
|
- return __vmintop(clear, i, 0, 0, 0);
|
|
|
+ return __vmintop(hvmi_clear, i, 0, 0, 0);
|
|
|
}
|
|
|
|
|
|
#else /* Only assembly code should reference these */
|