ia64/xen: Remove Xen support for ia64
ia64 has not been supported by Xen since 4.2 so it's time to drop Xen/ia64 from Linux as well. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:

committed by
Tony Luck

parent
374b105797
commit
d52eefb47d
@@ -111,8 +111,6 @@ static inline const char *acpi_get_sysname (void)
|
||||
return "uv";
|
||||
# elif defined (CONFIG_IA64_DIG)
|
||||
return "dig";
|
||||
# elif defined (CONFIG_IA64_XEN_GUEST)
|
||||
return "xen";
|
||||
# elif defined(CONFIG_IA64_DIG_VTD)
|
||||
return "dig_vtd";
|
||||
# else
|
||||
|
@@ -113,8 +113,6 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
|
||||
# include <asm/machvec_sn2.h>
|
||||
# elif defined (CONFIG_IA64_SGI_UV)
|
||||
# include <asm/machvec_uv.h>
|
||||
# elif defined (CONFIG_IA64_XEN_GUEST)
|
||||
# include <asm/machvec_xen.h>
|
||||
# elif defined (CONFIG_IA64_GENERIC)
|
||||
|
||||
# ifdef MACHVEC_PLATFORM_HEADER
|
||||
|
@@ -1,22 +0,0 @@
|
||||
#ifndef _ASM_IA64_MACHVEC_XEN_h
|
||||
#define _ASM_IA64_MACHVEC_XEN_h
|
||||
|
||||
extern ia64_mv_setup_t dig_setup;
|
||||
extern ia64_mv_cpu_init_t xen_cpu_init;
|
||||
extern ia64_mv_irq_init_t xen_irq_init;
|
||||
extern ia64_mv_send_ipi_t xen_platform_send_ipi;
|
||||
|
||||
/*
|
||||
* This stuff has dual use!
|
||||
*
|
||||
* For a generic kernel, the macros are used to initialize the
|
||||
* platform's machvec structure. When compiling a non-generic kernel,
|
||||
* the macros are used directly.
|
||||
*/
|
||||
#define ia64_platform_name "xen"
|
||||
#define platform_setup dig_setup
|
||||
#define platform_cpu_init xen_cpu_init
|
||||
#define platform_irq_init xen_irq_init
|
||||
#define platform_send_ipi xen_platform_send_ipi
|
||||
|
||||
#endif /* _ASM_IA64_MACHVEC_XEN_h */
|
@@ -18,7 +18,6 @@
|
||||
* - crash dumping code reserved region
|
||||
* - Kernel memory map built from EFI memory map
|
||||
* - ELF core header
|
||||
* - xen start info if CONFIG_XEN
|
||||
*
|
||||
* More could be added if necessary
|
||||
*/
|
||||
|
@@ -75,7 +75,6 @@ void *paravirt_get_gate_section(void);
|
||||
#ifdef CONFIG_PARAVIRT_GUEST
|
||||
|
||||
#define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0
|
||||
#define PARAVIRT_HYPERVISOR_TYPE_XEN 1
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
@@ -11,7 +11,7 @@
|
||||
/*
|
||||
* These structs MUST NOT be changed.
|
||||
* They are the ABI between hypervisor and guest OS.
|
||||
* Both Xen and KVM are using this.
|
||||
* KVM is using this.
|
||||
*
|
||||
* pvclock_vcpu_time_info holds the system time and the tsc timestamp
|
||||
* of the last update. So the guest can use the tsc delta to get a
|
||||
|
@@ -1,51 +0,0 @@
|
||||
#ifndef _ASM_IA64_SYNC_BITOPS_H
|
||||
#define _ASM_IA64_SYNC_BITOPS_H
|
||||
|
||||
/*
|
||||
* Copyright (C) 2008 Isaku Yamahata <yamahata at valinux co jp>
|
||||
*
|
||||
* Based on synch_bitops.h which Dan Magenhaimer wrote.
|
||||
*
|
||||
* bit operations which provide guaranteed strong synchronisation
|
||||
* when communicating with Xen or other guest OSes running on other CPUs.
|
||||
*/
|
||||
|
||||
static inline void sync_set_bit(int nr, volatile void *addr)
|
||||
{
|
||||
set_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline void sync_clear_bit(int nr, volatile void *addr)
|
||||
{
|
||||
clear_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline void sync_change_bit(int nr, volatile void *addr)
|
||||
{
|
||||
change_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline int sync_test_and_set_bit(int nr, volatile void *addr)
|
||||
{
|
||||
return test_and_set_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline int sync_test_and_clear_bit(int nr, volatile void *addr)
|
||||
{
|
||||
return test_and_clear_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline int sync_test_and_change_bit(int nr, volatile void *addr)
|
||||
{
|
||||
return test_and_change_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline int sync_test_bit(int nr, const volatile void *addr)
|
||||
{
|
||||
return test_bit(nr, addr);
|
||||
}
|
||||
|
||||
#define sync_cmpxchg(ptr, old, new) \
|
||||
((__typeof__(*(ptr)))cmpxchg_acq((ptr), (old), (new)))
|
||||
|
||||
#endif /* _ASM_IA64_SYNC_BITOPS_H */
|
@@ -1,41 +0,0 @@
|
||||
/******************************************************************************
|
||||
* arch/ia64/include/asm/xen/events.h
|
||||
*
|
||||
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
|
||||
* VA Linux Systems Japan K.K.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
*/
|
||||
#ifndef _ASM_IA64_XEN_EVENTS_H
|
||||
#define _ASM_IA64_XEN_EVENTS_H
|
||||
|
||||
enum ipi_vector {
|
||||
XEN_RESCHEDULE_VECTOR,
|
||||
XEN_IPI_VECTOR,
|
||||
XEN_CMCP_VECTOR,
|
||||
XEN_CPEP_VECTOR,
|
||||
|
||||
XEN_NR_IPIS,
|
||||
};
|
||||
|
||||
static inline int xen_irqs_disabled(struct pt_regs *regs)
|
||||
{
|
||||
return !(ia64_psr(regs)->i);
|
||||
}
|
||||
|
||||
#define irq_ctx_init(cpu) do { } while (0)
|
||||
|
||||
#endif /* _ASM_IA64_XEN_EVENTS_H */
|
@@ -1,265 +0,0 @@
|
||||
/******************************************************************************
|
||||
* hypercall.h
|
||||
*
|
||||
* Linux-specific hypervisor handling.
|
||||
*
|
||||
* Copyright (c) 2002-2004, K A Fraser
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation; or, when distributed
|
||||
* separately from the Linux kernel or incorporated into other
|
||||
* software packages, subject to the following license:
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this source file (the "Software"), to deal in the Software without
|
||||
* restriction, including without limitation the rights to use, copy, modify,
|
||||
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
|
||||
* and to permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_IA64_XEN_HYPERCALL_H
|
||||
#define _ASM_IA64_XEN_HYPERCALL_H
|
||||
|
||||
#include <xen/interface/xen.h>
|
||||
#include <xen/interface/physdev.h>
|
||||
#include <xen/interface/sched.h>
|
||||
#include <asm/xen/xcom_hcall.h>
|
||||
struct xencomm_handle;
|
||||
extern unsigned long __hypercall(unsigned long a1, unsigned long a2,
|
||||
unsigned long a3, unsigned long a4,
|
||||
unsigned long a5, unsigned long cmd);
|
||||
|
||||
/*
|
||||
* Assembler stubs for hyper-calls.
|
||||
*/
|
||||
|
||||
#define _hypercall0(type, name) \
|
||||
({ \
|
||||
long __res; \
|
||||
__res = __hypercall(0, 0, 0, 0, 0, __HYPERVISOR_##name);\
|
||||
(type)__res; \
|
||||
})
|
||||
|
||||
#define _hypercall1(type, name, a1) \
|
||||
({ \
|
||||
long __res; \
|
||||
__res = __hypercall((unsigned long)a1, \
|
||||
0, 0, 0, 0, __HYPERVISOR_##name); \
|
||||
(type)__res; \
|
||||
})
|
||||
|
||||
#define _hypercall2(type, name, a1, a2) \
|
||||
({ \
|
||||
long __res; \
|
||||
__res = __hypercall((unsigned long)a1, \
|
||||
(unsigned long)a2, \
|
||||
0, 0, 0, __HYPERVISOR_##name); \
|
||||
(type)__res; \
|
||||
})
|
||||
|
||||
#define _hypercall3(type, name, a1, a2, a3) \
|
||||
({ \
|
||||
long __res; \
|
||||
__res = __hypercall((unsigned long)a1, \
|
||||
(unsigned long)a2, \
|
||||
(unsigned long)a3, \
|
||||
0, 0, __HYPERVISOR_##name); \
|
||||
(type)__res; \
|
||||
})
|
||||
|
||||
#define _hypercall4(type, name, a1, a2, a3, a4) \
|
||||
({ \
|
||||
long __res; \
|
||||
__res = __hypercall((unsigned long)a1, \
|
||||
(unsigned long)a2, \
|
||||
(unsigned long)a3, \
|
||||
(unsigned long)a4, \
|
||||
0, __HYPERVISOR_##name); \
|
||||
(type)__res; \
|
||||
})
|
||||
|
||||
#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
|
||||
({ \
|
||||
long __res; \
|
||||
__res = __hypercall((unsigned long)a1, \
|
||||
(unsigned long)a2, \
|
||||
(unsigned long)a3, \
|
||||
(unsigned long)a4, \
|
||||
(unsigned long)a5, \
|
||||
__HYPERVISOR_##name); \
|
||||
(type)__res; \
|
||||
})
|
||||
|
||||
|
||||
static inline int
|
||||
xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg)
|
||||
{
|
||||
return _hypercall2(int, sched_op, cmd, arg);
|
||||
}
|
||||
|
||||
static inline long
|
||||
HYPERVISOR_set_timer_op(u64 timeout)
|
||||
{
|
||||
unsigned long timeout_hi = (unsigned long)(timeout >> 32);
|
||||
unsigned long timeout_lo = (unsigned long)timeout;
|
||||
return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
|
||||
}
|
||||
|
||||
static inline int
|
||||
xencomm_arch_hypercall_multicall(struct xencomm_handle *call_list,
|
||||
int nr_calls)
|
||||
{
|
||||
return _hypercall2(int, multicall, call_list, nr_calls);
|
||||
}
|
||||
|
||||
static inline int
|
||||
xencomm_arch_hypercall_memory_op(unsigned int cmd, struct xencomm_handle *arg)
|
||||
{
|
||||
return _hypercall2(int, memory_op, cmd, arg);
|
||||
}
|
||||
|
||||
static inline int
|
||||
xencomm_arch_hypercall_event_channel_op(int cmd, struct xencomm_handle *arg)
|
||||
{
|
||||
return _hypercall2(int, event_channel_op, cmd, arg);
|
||||
}
|
||||
|
||||
static inline int
|
||||
xencomm_arch_hypercall_xen_version(int cmd, struct xencomm_handle *arg)
|
||||
{
|
||||
return _hypercall2(int, xen_version, cmd, arg);
|
||||
}
|
||||
|
||||
static inline int
|
||||
xencomm_arch_hypercall_console_io(int cmd, int count,
|
||||
struct xencomm_handle *str)
|
||||
{
|
||||
return _hypercall3(int, console_io, cmd, count, str);
|
||||
}
|
||||
|
||||
static inline int
|
||||
xencomm_arch_hypercall_physdev_op(int cmd, struct xencomm_handle *arg)
|
||||
{
|
||||
return _hypercall2(int, physdev_op, cmd, arg);
|
||||
}
|
||||
|
||||
static inline int
|
||||
xencomm_arch_hypercall_grant_table_op(unsigned int cmd,
|
||||
struct xencomm_handle *uop,
|
||||
unsigned int count)
|
||||
{
|
||||
return _hypercall3(int, grant_table_op, cmd, uop, count);
|
||||
}
|
||||
|
||||
int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count);
|
||||
|
||||
extern int xencomm_arch_hypercall_suspend(struct xencomm_handle *arg);
|
||||
|
||||
static inline int
|
||||
xencomm_arch_hypercall_callback_op(int cmd, struct xencomm_handle *arg)
|
||||
{
|
||||
return _hypercall2(int, callback_op, cmd, arg);
|
||||
}
|
||||
|
||||
static inline long
|
||||
xencomm_arch_hypercall_vcpu_op(int cmd, int cpu, void *arg)
|
||||
{
|
||||
return _hypercall3(long, vcpu_op, cmd, cpu, arg);
|
||||
}
|
||||
|
||||
static inline int
|
||||
HYPERVISOR_physdev_op(int cmd, void *arg)
|
||||
{
|
||||
switch (cmd) {
|
||||
case PHYSDEVOP_eoi:
|
||||
return _hypercall1(int, ia64_fast_eoi,
|
||||
((struct physdev_eoi *)arg)->irq);
|
||||
default:
|
||||
return xencomm_hypercall_physdev_op(cmd, arg);
|
||||
}
|
||||
}
|
||||
|
||||
static inline long
|
||||
xencomm_arch_hypercall_opt_feature(struct xencomm_handle *arg)
|
||||
{
|
||||
return _hypercall1(long, opt_feature, arg);
|
||||
}
|
||||
|
||||
/* for balloon driver */
|
||||
#define HYPERVISOR_update_va_mapping(va, new_val, flags) (0)
|
||||
|
||||
/* Use xencomm to do hypercalls. */
|
||||
#define HYPERVISOR_sched_op xencomm_hypercall_sched_op
|
||||
#define HYPERVISOR_event_channel_op xencomm_hypercall_event_channel_op
|
||||
#define HYPERVISOR_callback_op xencomm_hypercall_callback_op
|
||||
#define HYPERVISOR_multicall xencomm_hypercall_multicall
|
||||
#define HYPERVISOR_xen_version xencomm_hypercall_xen_version
|
||||
#define HYPERVISOR_console_io xencomm_hypercall_console_io
|
||||
#define HYPERVISOR_memory_op xencomm_hypercall_memory_op
|
||||
#define HYPERVISOR_suspend xencomm_hypercall_suspend
|
||||
#define HYPERVISOR_vcpu_op xencomm_hypercall_vcpu_op
|
||||
#define HYPERVISOR_opt_feature xencomm_hypercall_opt_feature
|
||||
|
||||
/* to compile gnttab_copy_grant_page() in drivers/xen/core/gnttab.c */
|
||||
#define HYPERVISOR_mmu_update(req, count, success_count, domid) ({ BUG(); 0; })
|
||||
|
||||
static inline int
|
||||
HYPERVISOR_shutdown(
|
||||
unsigned int reason)
|
||||
{
|
||||
struct sched_shutdown sched_shutdown = {
|
||||
.reason = reason
|
||||
};
|
||||
|
||||
int rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* for netfront.c, netback.c */
|
||||
#define MULTI_UVMFLAGS_INDEX 0 /* XXX any value */
|
||||
|
||||
static inline void
|
||||
MULTI_update_va_mapping(
|
||||
struct multicall_entry *mcl, unsigned long va,
|
||||
pte_t new_val, unsigned long flags)
|
||||
{
|
||||
mcl->op = __HYPERVISOR_update_va_mapping;
|
||||
mcl->result = 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd,
|
||||
void *uop, unsigned int count)
|
||||
{
|
||||
mcl->op = __HYPERVISOR_grant_table_op;
|
||||
mcl->args[0] = cmd;
|
||||
mcl->args[1] = (unsigned long)uop;
|
||||
mcl->args[2] = count;
|
||||
}
|
||||
|
||||
static inline void
|
||||
MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
|
||||
int count, int *success_count, domid_t domid)
|
||||
{
|
||||
mcl->op = __HYPERVISOR_mmu_update;
|
||||
mcl->args[0] = (unsigned long)req;
|
||||
mcl->args[1] = count;
|
||||
mcl->args[2] = (unsigned long)success_count;
|
||||
mcl->args[3] = domid;
|
||||
}
|
||||
|
||||
#endif /* _ASM_IA64_XEN_HYPERCALL_H */
|
@@ -1,61 +0,0 @@
|
||||
/******************************************************************************
|
||||
* hypervisor.h
|
||||
*
|
||||
* Linux-specific hypervisor handling.
|
||||
*
|
||||
* Copyright (c) 2002-2004, K A Fraser
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation; or, when distributed
|
||||
* separately from the Linux kernel or incorporated into other
|
||||
* software packages, subject to the following license:
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this source file (the "Software"), to deal in the Software without
|
||||
* restriction, including without limitation the rights to use, copy, modify,
|
||||
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
|
||||
* and to permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_IA64_XEN_HYPERVISOR_H
|
||||
#define _ASM_IA64_XEN_HYPERVISOR_H
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <xen/interface/xen.h>
|
||||
#include <xen/interface/version.h> /* to compile feature.c */
|
||||
#include <xen/features.h> /* to comiple xen-netfront.c */
|
||||
#include <xen/xen.h>
|
||||
#include <asm/xen/hypercall.h>
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
extern struct shared_info *HYPERVISOR_shared_info;
|
||||
extern struct start_info *xen_start_info;
|
||||
|
||||
void __init xen_setup_vcpu_info_placement(void);
|
||||
void force_evtchn_callback(void);
|
||||
|
||||
/* for drivers/xen/balloon/balloon.c */
|
||||
#ifdef CONFIG_XEN_SCRUB_PAGES
|
||||
#define scrub_pages(_p, _n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
|
||||
#else
|
||||
#define scrub_pages(_p, _n) ((void)0)
|
||||
#endif
|
||||
|
||||
/* For setup_arch() in arch/ia64/kernel/setup.c */
|
||||
void xen_ia64_enable_opt_feature(void);
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_IA64_XEN_HYPERVISOR_H */
|
@@ -1,486 +0,0 @@
|
||||
/******************************************************************************
|
||||
* arch/ia64/include/asm/xen/inst.h
|
||||
*
|
||||
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
|
||||
* VA Linux Systems Japan K.K.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
*/
|
||||
|
||||
#include <asm/xen/privop.h>
|
||||
|
||||
#define ia64_ivt xen_ivt
|
||||
#define DO_SAVE_MIN XEN_DO_SAVE_MIN
|
||||
|
||||
#define __paravirt_switch_to xen_switch_to
|
||||
#define __paravirt_leave_syscall xen_leave_syscall
|
||||
#define __paravirt_work_processed_syscall xen_work_processed_syscall
|
||||
#define __paravirt_leave_kernel xen_leave_kernel
|
||||
#define __paravirt_pending_syscall_end xen_work_pending_syscall_end
|
||||
#define __paravirt_work_processed_syscall_target \
|
||||
xen_work_processed_syscall
|
||||
|
||||
#define paravirt_fsyscall_table xen_fsyscall_table
|
||||
#define paravirt_fsys_bubble_down xen_fsys_bubble_down
|
||||
|
||||
#define MOV_FROM_IFA(reg) \
|
||||
movl reg = XSI_IFA; \
|
||||
;; \
|
||||
ld8 reg = [reg]
|
||||
|
||||
#define MOV_FROM_ITIR(reg) \
|
||||
movl reg = XSI_ITIR; \
|
||||
;; \
|
||||
ld8 reg = [reg]
|
||||
|
||||
#define MOV_FROM_ISR(reg) \
|
||||
movl reg = XSI_ISR; \
|
||||
;; \
|
||||
ld8 reg = [reg]
|
||||
|
||||
#define MOV_FROM_IHA(reg) \
|
||||
movl reg = XSI_IHA; \
|
||||
;; \
|
||||
ld8 reg = [reg]
|
||||
|
||||
#define MOV_FROM_IPSR(pred, reg) \
|
||||
(pred) movl reg = XSI_IPSR; \
|
||||
;; \
|
||||
(pred) ld8 reg = [reg]
|
||||
|
||||
#define MOV_FROM_IIM(reg) \
|
||||
movl reg = XSI_IIM; \
|
||||
;; \
|
||||
ld8 reg = [reg]
|
||||
|
||||
#define MOV_FROM_IIP(reg) \
|
||||
movl reg = XSI_IIP; \
|
||||
;; \
|
||||
ld8 reg = [reg]
|
||||
|
||||
.macro __MOV_FROM_IVR reg, clob
|
||||
.ifc "\reg", "r8"
|
||||
XEN_HYPER_GET_IVR
|
||||
.exitm
|
||||
.endif
|
||||
.ifc "\clob", "r8"
|
||||
XEN_HYPER_GET_IVR
|
||||
;;
|
||||
mov \reg = r8
|
||||
.exitm
|
||||
.endif
|
||||
|
||||
mov \clob = r8
|
||||
;;
|
||||
XEN_HYPER_GET_IVR
|
||||
;;
|
||||
mov \reg = r8
|
||||
;;
|
||||
mov r8 = \clob
|
||||
.endm
|
||||
#define MOV_FROM_IVR(reg, clob) __MOV_FROM_IVR reg, clob
|
||||
|
||||
.macro __MOV_FROM_PSR pred, reg, clob
|
||||
.ifc "\reg", "r8"
|
||||
(\pred) XEN_HYPER_GET_PSR;
|
||||
.exitm
|
||||
.endif
|
||||
.ifc "\clob", "r8"
|
||||
(\pred) XEN_HYPER_GET_PSR
|
||||
;;
|
||||
(\pred) mov \reg = r8
|
||||
.exitm
|
||||
.endif
|
||||
|
||||
(\pred) mov \clob = r8
|
||||
(\pred) XEN_HYPER_GET_PSR
|
||||
;;
|
||||
(\pred) mov \reg = r8
|
||||
(\pred) mov r8 = \clob
|
||||
.endm
|
||||
#define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob
|
||||
|
||||
/* assuming ar.itc is read with interrupt disabled. */
|
||||
#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
|
||||
(pred) movl clob = XSI_ITC_OFFSET; \
|
||||
;; \
|
||||
(pred) ld8 clob = [clob]; \
|
||||
(pred) mov reg = ar.itc; \
|
||||
;; \
|
||||
(pred) add reg = reg, clob; \
|
||||
;; \
|
||||
(pred) movl clob = XSI_ITC_LAST; \
|
||||
;; \
|
||||
(pred) ld8 clob = [clob]; \
|
||||
;; \
|
||||
(pred) cmp.geu.unc pred_clob, p0 = clob, reg; \
|
||||
;; \
|
||||
(pred_clob) add reg = 1, clob; \
|
||||
;; \
|
||||
(pred) movl clob = XSI_ITC_LAST; \
|
||||
;; \
|
||||
(pred) st8 [clob] = reg
|
||||
|
||||
|
||||
#define MOV_TO_IFA(reg, clob) \
|
||||
movl clob = XSI_IFA; \
|
||||
;; \
|
||||
st8 [clob] = reg \
|
||||
|
||||
#define MOV_TO_ITIR(pred, reg, clob) \
|
||||
(pred) movl clob = XSI_ITIR; \
|
||||
;; \
|
||||
(pred) st8 [clob] = reg
|
||||
|
||||
#define MOV_TO_IHA(pred, reg, clob) \
|
||||
(pred) movl clob = XSI_IHA; \
|
||||
;; \
|
||||
(pred) st8 [clob] = reg
|
||||
|
||||
#define MOV_TO_IPSR(pred, reg, clob) \
|
||||
(pred) movl clob = XSI_IPSR; \
|
||||
;; \
|
||||
(pred) st8 [clob] = reg; \
|
||||
;;
|
||||
|
||||
#define MOV_TO_IFS(pred, reg, clob) \
|
||||
(pred) movl clob = XSI_IFS; \
|
||||
;; \
|
||||
(pred) st8 [clob] = reg; \
|
||||
;;
|
||||
|
||||
#define MOV_TO_IIP(reg, clob) \
|
||||
movl clob = XSI_IIP; \
|
||||
;; \
|
||||
st8 [clob] = reg
|
||||
|
||||
.macro ____MOV_TO_KR kr, reg, clob0, clob1
|
||||
.ifc "\clob0", "r9"
|
||||
.error "clob0 \clob0 must not be r9"
|
||||
.endif
|
||||
.ifc "\clob1", "r8"
|
||||
.error "clob1 \clob1 must not be r8"
|
||||
.endif
|
||||
|
||||
.ifnc "\reg", "r9"
|
||||
.ifnc "\clob1", "r9"
|
||||
mov \clob1 = r9
|
||||
.endif
|
||||
mov r9 = \reg
|
||||
.endif
|
||||
.ifnc "\clob0", "r8"
|
||||
mov \clob0 = r8
|
||||
.endif
|
||||
mov r8 = \kr
|
||||
;;
|
||||
XEN_HYPER_SET_KR
|
||||
|
||||
.ifnc "\reg", "r9"
|
||||
.ifnc "\clob1", "r9"
|
||||
mov r9 = \clob1
|
||||
.endif
|
||||
.endif
|
||||
.ifnc "\clob0", "r8"
|
||||
mov r8 = \clob0
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro __MOV_TO_KR kr, reg, clob0, clob1
|
||||
.ifc "\clob0", "r9"
|
||||
____MOV_TO_KR \kr, \reg, \clob1, \clob0
|
||||
.exitm
|
||||
.endif
|
||||
.ifc "\clob1", "r8"
|
||||
____MOV_TO_KR \kr, \reg, \clob1, \clob0
|
||||
.exitm
|
||||
.endif
|
||||
|
||||
____MOV_TO_KR \kr, \reg, \clob0, \clob1
|
||||
.endm
|
||||
|
||||
#define MOV_TO_KR(kr, reg, clob0, clob1) \
|
||||
__MOV_TO_KR IA64_KR_ ## kr, reg, clob0, clob1
|
||||
|
||||
|
||||
.macro __ITC_I pred, reg, clob
|
||||
.ifc "\reg", "r8"
|
||||
(\pred) XEN_HYPER_ITC_I
|
||||
.exitm
|
||||
.endif
|
||||
.ifc "\clob", "r8"
|
||||
(\pred) mov r8 = \reg
|
||||
;;
|
||||
(\pred) XEN_HYPER_ITC_I
|
||||
.exitm
|
||||
.endif
|
||||
|
||||
(\pred) mov \clob = r8
|
||||
(\pred) mov r8 = \reg
|
||||
;;
|
||||
(\pred) XEN_HYPER_ITC_I
|
||||
;;
|
||||
(\pred) mov r8 = \clob
|
||||
;;
|
||||
.endm
|
||||
#define ITC_I(pred, reg, clob) __ITC_I pred, reg, clob
|
||||
|
||||
.macro __ITC_D pred, reg, clob
|
||||
.ifc "\reg", "r8"
|
||||
(\pred) XEN_HYPER_ITC_D
|
||||
;;
|
||||
.exitm
|
||||
.endif
|
||||
.ifc "\clob", "r8"
|
||||
(\pred) mov r8 = \reg
|
||||
;;
|
||||
(\pred) XEN_HYPER_ITC_D
|
||||
;;
|
||||
.exitm
|
||||
.endif
|
||||
|
||||
(\pred) mov \clob = r8
|
||||
(\pred) mov r8 = \reg
|
||||
;;
|
||||
(\pred) XEN_HYPER_ITC_D
|
||||
;;
|
||||
(\pred) mov r8 = \clob
|
||||
;;
|
||||
.endm
|
||||
#define ITC_D(pred, reg, clob) __ITC_D pred, reg, clob
|
||||
|
||||
.macro __ITC_I_AND_D pred_i, pred_d, reg, clob
|
||||
.ifc "\reg", "r8"
|
||||
(\pred_i)XEN_HYPER_ITC_I
|
||||
;;
|
||||
(\pred_d)XEN_HYPER_ITC_D
|
||||
;;
|
||||
.exitm
|
||||
.endif
|
||||
.ifc "\clob", "r8"
|
||||
mov r8 = \reg
|
||||
;;
|
||||
(\pred_i)XEN_HYPER_ITC_I
|
||||
;;
|
||||
(\pred_d)XEN_HYPER_ITC_D
|
||||
;;
|
||||
.exitm
|
||||
.endif
|
||||
|
||||
mov \clob = r8
|
||||
mov r8 = \reg
|
||||
;;
|
||||
(\pred_i)XEN_HYPER_ITC_I
|
||||
;;
|
||||
(\pred_d)XEN_HYPER_ITC_D
|
||||
;;
|
||||
mov r8 = \clob
|
||||
;;
|
||||
.endm
|
||||
#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
|
||||
__ITC_I_AND_D pred_i, pred_d, reg, clob
|
||||
|
||||
.macro __THASH pred, reg0, reg1, clob
|
||||
.ifc "\reg0", "r8"
|
||||
(\pred) mov r8 = \reg1
|
||||
(\pred) XEN_HYPER_THASH
|
||||
.exitm
|
||||
.endc
|
||||
.ifc "\reg1", "r8"
|
||||
(\pred) XEN_HYPER_THASH
|
||||
;;
|
||||
(\pred) mov \reg0 = r8
|
||||
;;
|
||||
.exitm
|
||||
.endif
|
||||
.ifc "\clob", "r8"
|
||||
(\pred) mov r8 = \reg1
|
||||
(\pred) XEN_HYPER_THASH
|
||||
;;
|
||||
(\pred) mov \reg0 = r8
|
||||
;;
|
||||
.exitm
|
||||
.endif
|
||||
|
||||
(\pred) mov \clob = r8
|
||||
(\pred) mov r8 = \reg1
|
||||
(\pred) XEN_HYPER_THASH
|
||||
;;
|
||||
(\pred) mov \reg0 = r8
|
||||
(\pred) mov r8 = \clob
|
||||
;;
|
||||
.endm
|
||||
#define THASH(pred, reg0, reg1, clob) __THASH pred, reg0, reg1, clob
|
||||
|
||||
#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
|
||||
mov clob0 = 1; \
|
||||
movl clob1 = XSI_PSR_IC; \
|
||||
;; \
|
||||
st4 [clob1] = clob0 \
|
||||
;;
|
||||
|
||||
#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
|
||||
;; \
|
||||
srlz.d; \
|
||||
mov clob1 = 1; \
|
||||
movl clob0 = XSI_PSR_IC; \
|
||||
;; \
|
||||
st4 [clob0] = clob1
|
||||
|
||||
#define RSM_PSR_IC(clob) \
|
||||
movl clob = XSI_PSR_IC; \
|
||||
;; \
|
||||
st4 [clob] = r0; \
|
||||
;;
|
||||
|
||||
/* pred will be clobbered */
|
||||
#define MASK_TO_PEND_OFS (-1)
|
||||
#define SSM_PSR_I(pred, pred_clob, clob) \
|
||||
(pred) movl clob = XSI_PSR_I_ADDR \
|
||||
;; \
|
||||
(pred) ld8 clob = [clob] \
|
||||
;; \
|
||||
/* if (pred) vpsr.i = 1 */ \
|
||||
/* if (pred) (vcpu->vcpu_info->evtchn_upcall_mask)=0 */ \
|
||||
(pred) st1 [clob] = r0, MASK_TO_PEND_OFS \
|
||||
;; \
|
||||
/* if (vcpu->vcpu_info->evtchn_upcall_pending) */ \
|
||||
(pred) ld1 clob = [clob] \
|
||||
;; \
|
||||
(pred) cmp.ne.unc pred_clob, p0 = clob, r0 \
|
||||
;; \
|
||||
(pred_clob)XEN_HYPER_SSM_I /* do areal ssm psr.i */
|
||||
|
||||
#define RSM_PSR_I(pred, clob0, clob1) \
|
||||
movl clob0 = XSI_PSR_I_ADDR; \
|
||||
mov clob1 = 1; \
|
||||
;; \
|
||||
ld8 clob0 = [clob0]; \
|
||||
;; \
|
||||
(pred) st1 [clob0] = clob1
|
||||
|
||||
#define RSM_PSR_I_IC(clob0, clob1, clob2) \
|
||||
movl clob0 = XSI_PSR_I_ADDR; \
|
||||
movl clob1 = XSI_PSR_IC; \
|
||||
;; \
|
||||
ld8 clob0 = [clob0]; \
|
||||
mov clob2 = 1; \
|
||||
;; \
|
||||
/* note: clears both vpsr.i and vpsr.ic! */ \
|
||||
st1 [clob0] = clob2; \
|
||||
st4 [clob1] = r0; \
|
||||
;;
|
||||
|
||||
#define RSM_PSR_DT \
|
||||
XEN_HYPER_RSM_PSR_DT
|
||||
|
||||
#define RSM_PSR_BE_I(clob0, clob1) \
|
||||
RSM_PSR_I(p0, clob0, clob1); \
|
||||
rum psr.be
|
||||
|
||||
#define SSM_PSR_DT_AND_SRLZ_I \
|
||||
XEN_HYPER_SSM_PSR_DT
|
||||
|
||||
#define BSW_0(clob0, clob1, clob2) \
|
||||
;; \
|
||||
/* r16-r31 all now hold bank1 values */ \
|
||||
mov clob2 = ar.unat; \
|
||||
movl clob0 = XSI_BANK1_R16; \
|
||||
movl clob1 = XSI_BANK1_R16 + 8; \
|
||||
;; \
|
||||
.mem.offset 0, 0; st8.spill [clob0] = r16, 16; \
|
||||
.mem.offset 8, 0; st8.spill [clob1] = r17, 16; \
|
||||
;; \
|
||||
.mem.offset 0, 0; st8.spill [clob0] = r18, 16; \
|
||||
.mem.offset 8, 0; st8.spill [clob1] = r19, 16; \
|
||||
;; \
|
||||
.mem.offset 0, 0; st8.spill [clob0] = r20, 16; \
|
||||
.mem.offset 8, 0; st8.spill [clob1] = r21, 16; \
|
||||
;; \
|
||||
.mem.offset 0, 0; st8.spill [clob0] = r22, 16; \
|
||||
.mem.offset 8, 0; st8.spill [clob1] = r23, 16; \
|
||||
;; \
|
||||
.mem.offset 0, 0; st8.spill [clob0] = r24, 16; \
|
||||
.mem.offset 8, 0; st8.spill [clob1] = r25, 16; \
|
||||
;; \
|
||||
.mem.offset 0, 0; st8.spill [clob0] = r26, 16; \
|
||||
.mem.offset 8, 0; st8.spill [clob1] = r27, 16; \
|
||||
;; \
|
||||
.mem.offset 0, 0; st8.spill [clob0] = r28, 16; \
|
||||
.mem.offset 8, 0; st8.spill [clob1] = r29, 16; \
|
||||
;; \
|
||||
.mem.offset 0, 0; st8.spill [clob0] = r30, 16; \
|
||||
.mem.offset 8, 0; st8.spill [clob1] = r31, 16; \
|
||||
;; \
|
||||
mov clob1 = ar.unat; \
|
||||
movl clob0 = XSI_B1NAT; \
|
||||
;; \
|
||||
st8 [clob0] = clob1; \
|
||||
mov ar.unat = clob2; \
|
||||
movl clob0 = XSI_BANKNUM; \
|
||||
;; \
|
||||
st4 [clob0] = r0
|
||||
|
||||
|
||||
/* FIXME: THIS CODE IS NOT NaT SAFE! */
|
||||
#define XEN_BSW_1(clob) \
|
||||
mov clob = ar.unat; \
|
||||
movl r30 = XSI_B1NAT; \
|
||||
;; \
|
||||
ld8 r30 = [r30]; \
|
||||
mov r31 = 1; \
|
||||
;; \
|
||||
mov ar.unat = r30; \
|
||||
movl r30 = XSI_BANKNUM; \
|
||||
;; \
|
||||
st4 [r30] = r31; \
|
||||
movl r30 = XSI_BANK1_R16; \
|
||||
movl r31 = XSI_BANK1_R16+8; \
|
||||
;; \
|
||||
ld8.fill r16 = [r30], 16; \
|
||||
ld8.fill r17 = [r31], 16; \
|
||||
;; \
|
||||
ld8.fill r18 = [r30], 16; \
|
||||
ld8.fill r19 = [r31], 16; \
|
||||
;; \
|
||||
ld8.fill r20 = [r30], 16; \
|
||||
ld8.fill r21 = [r31], 16; \
|
||||
;; \
|
||||
ld8.fill r22 = [r30], 16; \
|
||||
ld8.fill r23 = [r31], 16; \
|
||||
;; \
|
||||
ld8.fill r24 = [r30], 16; \
|
||||
ld8.fill r25 = [r31], 16; \
|
||||
;; \
|
||||
ld8.fill r26 = [r30], 16; \
|
||||
ld8.fill r27 = [r31], 16; \
|
||||
;; \
|
||||
ld8.fill r28 = [r30], 16; \
|
||||
ld8.fill r29 = [r31], 16; \
|
||||
;; \
|
||||
ld8.fill r30 = [r30]; \
|
||||
ld8.fill r31 = [r31]; \
|
||||
;; \
|
||||
mov ar.unat = clob
|
||||
|
||||
#define BSW_1(clob0, clob1) XEN_BSW_1(clob1)
|
||||
|
||||
|
||||
#define COVER \
|
||||
XEN_HYPER_COVER
|
||||
|
||||
#define RFI \
|
||||
XEN_HYPER_RFI; \
|
||||
dv_serialize_data
|
@@ -1,363 +0,0 @@
|
||||
/******************************************************************************
|
||||
* arch-ia64/hypervisor-if.h
|
||||
*
|
||||
* Guest OS interface to IA64 Xen.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to
|
||||
* deal in the Software without restriction, including without limitation the
|
||||
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
* sell copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Copyright by those who contributed. (in alphabetical order)
|
||||
*
|
||||
* Anthony Xu <anthony.xu@intel.com>
|
||||
* Eddie Dong <eddie.dong@intel.com>
|
||||
* Fred Yang <fred.yang@intel.com>
|
||||
* Kevin Tian <kevin.tian@intel.com>
|
||||
* Alex Williamson <alex.williamson@hp.com>
|
||||
* Chris Wright <chrisw@sous-sol.org>
|
||||
* Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
|
||||
* Dietmar Hahn <dietmar.hahn@fujitsu-siemens.com>
|
||||
* Hollis Blanchard <hollisb@us.ibm.com>
|
||||
* Isaku Yamahata <yamahata@valinux.co.jp>
|
||||
* Jan Beulich <jbeulich@novell.com>
|
||||
* John Levon <john.levon@sun.com>
|
||||
* Kazuhiro Suzuki <kaz@jp.fujitsu.com>
|
||||
* Keir Fraser <keir.fraser@citrix.com>
|
||||
* Kouya Shimura <kouya@jp.fujitsu.com>
|
||||
* Masaki Kanno <kanno.masaki@jp.fujitsu.com>
|
||||
* Matt Chapman <matthewc@hp.com>
|
||||
* Matthew Chapman <matthewc@hp.com>
|
||||
* Samuel Thibault <samuel.thibault@eu.citrix.com>
|
||||
* Tomonari Horikoshi <t.horikoshi@jp.fujitsu.com>
|
||||
* Tristan Gingold <tgingold@free.fr>
|
||||
* Tsunehisa Doi <Doi.Tsunehisa@jp.fujitsu.com>
|
||||
* Yutaka Ezaki <yutaka.ezaki@jp.fujitsu.com>
|
||||
* Zhang Xin <xing.z.zhang@intel.com>
|
||||
* Zhang xiantao <xiantao.zhang@intel.com>
|
||||
* dan.magenheimer@hp.com
|
||||
* ian.pratt@cl.cam.ac.uk
|
||||
* michael.fetterman@cl.cam.ac.uk
|
||||
*/
|
||||
|
||||
#ifndef _ASM_IA64_XEN_INTERFACE_H
|
||||
#define _ASM_IA64_XEN_INTERFACE_H
|
||||
|
||||
#define __DEFINE_GUEST_HANDLE(name, type) \
|
||||
typedef struct { type *p; } __guest_handle_ ## name
|
||||
|
||||
#define DEFINE_GUEST_HANDLE_STRUCT(name) \
|
||||
__DEFINE_GUEST_HANDLE(name, struct name)
|
||||
#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
|
||||
#define GUEST_HANDLE(name) __guest_handle_ ## name
|
||||
#define GUEST_HANDLE_64(name) GUEST_HANDLE(name)
|
||||
#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
/* Explicitly size integers that represent pfns in the public interface
|
||||
* with Xen so that we could have one ABI that works for 32 and 64 bit
|
||||
* guests. */
|
||||
typedef unsigned long xen_pfn_t;
|
||||
typedef unsigned long xen_ulong_t;
|
||||
/* Guest handles for primitive C types. */
|
||||
__DEFINE_GUEST_HANDLE(uchar, unsigned char);
|
||||
__DEFINE_GUEST_HANDLE(uint, unsigned int);
|
||||
__DEFINE_GUEST_HANDLE(ulong, unsigned long);
|
||||
|
||||
DEFINE_GUEST_HANDLE(char);
|
||||
DEFINE_GUEST_HANDLE(int);
|
||||
DEFINE_GUEST_HANDLE(long);
|
||||
DEFINE_GUEST_HANDLE(void);
|
||||
DEFINE_GUEST_HANDLE(uint64_t);
|
||||
DEFINE_GUEST_HANDLE(uint32_t);
|
||||
|
||||
DEFINE_GUEST_HANDLE(xen_pfn_t);
|
||||
#define PRI_xen_pfn "lx"
|
||||
#endif
|
||||
|
||||
/* Arch specific VIRQs definition */
|
||||
#define VIRQ_ITC VIRQ_ARCH_0 /* V. Virtual itc timer */
|
||||
#define VIRQ_MCA_CMC VIRQ_ARCH_1 /* MCA cmc interrupt */
|
||||
#define VIRQ_MCA_CPE VIRQ_ARCH_2 /* MCA cpe interrupt */
|
||||
|
||||
/* Maximum number of virtual CPUs in multi-processor guests. */
|
||||
/* keep sizeof(struct shared_page) <= PAGE_SIZE.
|
||||
* this is checked in arch/ia64/xen/hypervisor.c. */
|
||||
#define MAX_VIRT_CPUS 64
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define INVALID_MFN (~0UL)
|
||||
|
||||
union vac {
|
||||
unsigned long value;
|
||||
struct {
|
||||
int a_int:1;
|
||||
int a_from_int_cr:1;
|
||||
int a_to_int_cr:1;
|
||||
int a_from_psr:1;
|
||||
int a_from_cpuid:1;
|
||||
int a_cover:1;
|
||||
int a_bsw:1;
|
||||
long reserved:57;
|
||||
};
|
||||
};
|
||||
|
||||
union vdc {
|
||||
unsigned long value;
|
||||
struct {
|
||||
int d_vmsw:1;
|
||||
int d_extint:1;
|
||||
int d_ibr_dbr:1;
|
||||
int d_pmc:1;
|
||||
int d_to_pmd:1;
|
||||
int d_itm:1;
|
||||
long reserved:58;
|
||||
};
|
||||
};
|
||||
|
||||
struct mapped_regs {
|
||||
union vac vac;
|
||||
union vdc vdc;
|
||||
unsigned long virt_env_vaddr;
|
||||
unsigned long reserved1[29];
|
||||
unsigned long vhpi;
|
||||
unsigned long reserved2[95];
|
||||
union {
|
||||
unsigned long vgr[16];
|
||||
unsigned long bank1_regs[16]; /* bank1 regs (r16-r31)
|
||||
when bank0 active */
|
||||
};
|
||||
union {
|
||||
unsigned long vbgr[16];
|
||||
unsigned long bank0_regs[16]; /* bank0 regs (r16-r31)
|
||||
when bank1 active */
|
||||
};
|
||||
unsigned long vnat;
|
||||
unsigned long vbnat;
|
||||
unsigned long vcpuid[5];
|
||||
unsigned long reserved3[11];
|
||||
unsigned long vpsr;
|
||||
unsigned long vpr;
|
||||
unsigned long reserved4[76];
|
||||
union {
|
||||
unsigned long vcr[128];
|
||||
struct {
|
||||
unsigned long dcr; /* CR0 */
|
||||
unsigned long itm;
|
||||
unsigned long iva;
|
||||
unsigned long rsv1[5];
|
||||
unsigned long pta; /* CR8 */
|
||||
unsigned long rsv2[7];
|
||||
unsigned long ipsr; /* CR16 */
|
||||
unsigned long isr;
|
||||
unsigned long rsv3;
|
||||
unsigned long iip;
|
||||
unsigned long ifa;
|
||||
unsigned long itir;
|
||||
unsigned long iipa;
|
||||
unsigned long ifs;
|
||||
unsigned long iim; /* CR24 */
|
||||
unsigned long iha;
|
||||
unsigned long rsv4[38];
|
||||
unsigned long lid; /* CR64 */
|
||||
unsigned long ivr;
|
||||
unsigned long tpr;
|
||||
unsigned long eoi;
|
||||
unsigned long irr[4];
|
||||
unsigned long itv; /* CR72 */
|
||||
unsigned long pmv;
|
||||
unsigned long cmcv;
|
||||
unsigned long rsv5[5];
|
||||
unsigned long lrr0; /* CR80 */
|
||||
unsigned long lrr1;
|
||||
unsigned long rsv6[46];
|
||||
};
|
||||
};
|
||||
union {
|
||||
unsigned long reserved5[128];
|
||||
struct {
|
||||
unsigned long precover_ifs;
|
||||
unsigned long unat; /* not sure if this is needed
|
||||
until NaT arch is done */
|
||||
int interrupt_collection_enabled; /* virtual psr.ic */
|
||||
|
||||
/* virtual interrupt deliverable flag is
|
||||
* evtchn_upcall_mask in shared info area now.
|
||||
* interrupt_mask_addr is the address
|
||||
* of evtchn_upcall_mask for current vcpu
|
||||
*/
|
||||
unsigned char *interrupt_mask_addr;
|
||||
int pending_interruption;
|
||||
unsigned char vpsr_pp;
|
||||
unsigned char vpsr_dfh;
|
||||
unsigned char hpsr_dfh;
|
||||
unsigned char hpsr_mfh;
|
||||
unsigned long reserved5_1[4];
|
||||
int metaphysical_mode; /* 1 = use metaphys mapping
|
||||
0 = use virtual */
|
||||
int banknum; /* 0 or 1, which virtual
|
||||
register bank is active */
|
||||
unsigned long rrs[8]; /* region registers */
|
||||
unsigned long krs[8]; /* kernel registers */
|
||||
unsigned long tmp[16]; /* temp registers
|
||||
(e.g. for hyperprivops) */
|
||||
|
||||
/* itc paravirtualization
|
||||
* vAR.ITC = mAR.ITC + itc_offset
|
||||
* itc_last is one which was lastly passed to
|
||||
* the guest OS in order to prevent it from
|
||||
* going backwords.
|
||||
*/
|
||||
unsigned long itc_offset;
|
||||
unsigned long itc_last;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
struct arch_vcpu_info {
|
||||
/* nothing */
|
||||
};
|
||||
|
||||
/*
|
||||
* This structure is used for magic page in domain pseudo physical address
|
||||
* space and the result of XENMEM_machine_memory_map.
|
||||
* As the XENMEM_machine_memory_map result,
|
||||
* xen_memory_map::nr_entries indicates the size in bytes
|
||||
* including struct xen_ia64_memmap_info. Not the number of entries.
|
||||
*/
|
||||
struct xen_ia64_memmap_info {
|
||||
uint64_t efi_memmap_size; /* size of EFI memory map */
|
||||
uint64_t efi_memdesc_size; /* size of an EFI memory map
|
||||
* descriptor */
|
||||
uint32_t efi_memdesc_version; /* memory descriptor version */
|
||||
void *memdesc[0]; /* array of efi_memory_desc_t */
|
||||
};
|
||||
|
||||
struct arch_shared_info {
|
||||
/* PFN of the start_info page. */
|
||||
unsigned long start_info_pfn;
|
||||
|
||||
/* Interrupt vector for event channel. */
|
||||
int evtchn_vector;
|
||||
|
||||
/* PFN of memmap_info page */
|
||||
unsigned int memmap_info_num_pages; /* currently only = 1 case is
|
||||
supported. */
|
||||
unsigned long memmap_info_pfn;
|
||||
|
||||
uint64_t pad[31];
|
||||
};
|
||||
|
||||
struct xen_callback {
|
||||
unsigned long ip;
|
||||
};
|
||||
typedef struct xen_callback xen_callback_t;
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#include <asm/pvclock-abi.h>
|
||||
|
||||
/* Size of the shared_info area (this is not related to page size). */
|
||||
#define XSI_SHIFT 14
|
||||
#define XSI_SIZE (1 << XSI_SHIFT)
|
||||
/* Log size of mapped_regs area (64 KB - only 4KB is used). */
|
||||
#define XMAPPEDREGS_SHIFT 12
|
||||
#define XMAPPEDREGS_SIZE (1 << XMAPPEDREGS_SHIFT)
|
||||
/* Offset of XASI (Xen arch shared info) wrt XSI_BASE. */
|
||||
#define XMAPPEDREGS_OFS XSI_SIZE
|
||||
|
||||
/* Hyperprivops. */
|
||||
#define HYPERPRIVOP_START 0x1
|
||||
#define HYPERPRIVOP_RFI (HYPERPRIVOP_START + 0x0)
|
||||
#define HYPERPRIVOP_RSM_DT (HYPERPRIVOP_START + 0x1)
|
||||
#define HYPERPRIVOP_SSM_DT (HYPERPRIVOP_START + 0x2)
|
||||
#define HYPERPRIVOP_COVER (HYPERPRIVOP_START + 0x3)
|
||||
#define HYPERPRIVOP_ITC_D (HYPERPRIVOP_START + 0x4)
|
||||
#define HYPERPRIVOP_ITC_I (HYPERPRIVOP_START + 0x5)
|
||||
#define HYPERPRIVOP_SSM_I (HYPERPRIVOP_START + 0x6)
|
||||
#define HYPERPRIVOP_GET_IVR (HYPERPRIVOP_START + 0x7)
|
||||
#define HYPERPRIVOP_GET_TPR (HYPERPRIVOP_START + 0x8)
|
||||
#define HYPERPRIVOP_SET_TPR (HYPERPRIVOP_START + 0x9)
|
||||
#define HYPERPRIVOP_EOI (HYPERPRIVOP_START + 0xa)
|
||||
#define HYPERPRIVOP_SET_ITM (HYPERPRIVOP_START + 0xb)
|
||||
#define HYPERPRIVOP_THASH (HYPERPRIVOP_START + 0xc)
|
||||
#define HYPERPRIVOP_PTC_GA (HYPERPRIVOP_START + 0xd)
|
||||
#define HYPERPRIVOP_ITR_D (HYPERPRIVOP_START + 0xe)
|
||||
#define HYPERPRIVOP_GET_RR (HYPERPRIVOP_START + 0xf)
|
||||
#define HYPERPRIVOP_SET_RR (HYPERPRIVOP_START + 0x10)
|
||||
#define HYPERPRIVOP_SET_KR (HYPERPRIVOP_START + 0x11)
|
||||
#define HYPERPRIVOP_FC (HYPERPRIVOP_START + 0x12)
|
||||
#define HYPERPRIVOP_GET_CPUID (HYPERPRIVOP_START + 0x13)
|
||||
#define HYPERPRIVOP_GET_PMD (HYPERPRIVOP_START + 0x14)
|
||||
#define HYPERPRIVOP_GET_EFLAG (HYPERPRIVOP_START + 0x15)
|
||||
#define HYPERPRIVOP_SET_EFLAG (HYPERPRIVOP_START + 0x16)
|
||||
#define HYPERPRIVOP_RSM_BE (HYPERPRIVOP_START + 0x17)
|
||||
#define HYPERPRIVOP_GET_PSR (HYPERPRIVOP_START + 0x18)
|
||||
#define HYPERPRIVOP_SET_RR0_TO_RR4 (HYPERPRIVOP_START + 0x19)
|
||||
#define HYPERPRIVOP_MAX (0x1a)
|
||||
|
||||
/* Fast and light hypercalls. */
|
||||
#define __HYPERVISOR_ia64_fast_eoi __HYPERVISOR_arch_1
|
||||
|
||||
/* Xencomm macros. */
|
||||
#define XENCOMM_INLINE_MASK 0xf800000000000000UL
|
||||
#define XENCOMM_INLINE_FLAG 0x8000000000000000UL
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
* Optimization features.
|
||||
* The hypervisor may do some special optimizations for guests. This hypercall
|
||||
* can be used to switch on/of these special optimizations.
|
||||
*/
|
||||
#define __HYPERVISOR_opt_feature 0x700UL
|
||||
|
||||
#define XEN_IA64_OPTF_OFF 0x0
|
||||
#define XEN_IA64_OPTF_ON 0x1
|
||||
|
||||
/*
|
||||
* If this feature is switched on, the hypervisor inserts the
|
||||
* tlb entries without calling the guests traphandler.
|
||||
* This is useful in guests using region 7 for identity mapping
|
||||
* like the linux kernel does.
|
||||
*/
|
||||
#define XEN_IA64_OPTF_IDENT_MAP_REG7 1
|
||||
|
||||
/* Identity mapping of region 4 addresses in HVM. */
|
||||
#define XEN_IA64_OPTF_IDENT_MAP_REG4 2
|
||||
|
||||
/* Identity mapping of region 5 addresses in HVM. */
|
||||
#define XEN_IA64_OPTF_IDENT_MAP_REG5 3
|
||||
|
||||
#define XEN_IA64_OPTF_IDENT_MAP_NOT_SET (0)
|
||||
|
||||
struct xen_ia64_opt_feature {
|
||||
unsigned long cmd; /* Which feature */
|
||||
unsigned char on; /* Switch feature on/off */
|
||||
union {
|
||||
struct {
|
||||
/* The page protection bit mask of the pte.
|
||||
* This will be or'ed with the pte. */
|
||||
unsigned long pgprot;
|
||||
unsigned long key; /* A protection key for itir.*/
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_IA64_XEN_INTERFACE_H */
|
@@ -1,44 +0,0 @@
|
||||
/******************************************************************************
|
||||
* arch/ia64/include/asm/xen/irq.h
|
||||
*
|
||||
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
|
||||
* VA Linux Systems Japan K.K.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _ASM_IA64_XEN_IRQ_H
|
||||
#define _ASM_IA64_XEN_IRQ_H
|
||||
|
||||
/*
|
||||
* The flat IRQ space is divided into two regions:
|
||||
* 1. A one-to-one mapping of real physical IRQs. This space is only used
|
||||
* if we have physical device-access privilege. This region is at the
|
||||
* start of the IRQ space so that existing device drivers do not need
|
||||
* to be modified to translate physical IRQ numbers into our IRQ space.
|
||||
* 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
|
||||
* are bound using the provided bind/unbind functions.
|
||||
*/
|
||||
|
||||
#define XEN_PIRQ_BASE 0
|
||||
#define XEN_NR_PIRQS 256
|
||||
|
||||
#define XEN_DYNIRQ_BASE (XEN_PIRQ_BASE + XEN_NR_PIRQS)
|
||||
#define XEN_NR_DYNIRQS (NR_CPUS * 8)
|
||||
|
||||
#define XEN_NR_IRQS (XEN_NR_PIRQS + XEN_NR_DYNIRQS)
|
||||
|
||||
#endif /* _ASM_IA64_XEN_IRQ_H */
|
@@ -1,143 +0,0 @@
|
||||
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||
/* read ar.itc in advance, and use it before leaving bank 0 */
|
||||
#define XEN_ACCOUNT_GET_STAMP \
|
||||
MOV_FROM_ITC(pUStk, p6, r20, r2);
|
||||
#else
|
||||
#define XEN_ACCOUNT_GET_STAMP
|
||||
#endif
|
||||
|
||||
/*
|
||||
* DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
|
||||
* the minimum state necessary that allows us to turn psr.ic back
|
||||
* on.
|
||||
*
|
||||
* Assumed state upon entry:
|
||||
* psr.ic: off
|
||||
* r31: contains saved predicates (pr)
|
||||
*
|
||||
* Upon exit, the state is as follows:
|
||||
* psr.ic: off
|
||||
* r2 = points to &pt_regs.r16
|
||||
* r8 = contents of ar.ccv
|
||||
* r9 = contents of ar.csd
|
||||
* r10 = contents of ar.ssd
|
||||
* r11 = FPSR_DEFAULT
|
||||
* r12 = kernel sp (kernel virtual address)
|
||||
* r13 = points to current task_struct (kernel virtual address)
|
||||
* p15 = TRUE if psr.i is set in cr.ipsr
|
||||
* predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
|
||||
* preserved
|
||||
* CONFIG_XEN note: p6/p7 are not preserved
|
||||
*
|
||||
* Note that psr.ic is NOT turned on by this macro. This is so that
|
||||
* we can pass interruption state as arguments to a handler.
|
||||
*/
|
||||
#define XEN_DO_SAVE_MIN(__COVER,SAVE_IFS,EXTRA,WORKAROUND) \
|
||||
mov r16=IA64_KR(CURRENT); /* M */ \
|
||||
mov r27=ar.rsc; /* M */ \
|
||||
mov r20=r1; /* A */ \
|
||||
mov r25=ar.unat; /* M */ \
|
||||
MOV_FROM_IPSR(p0,r29); /* M */ \
|
||||
MOV_FROM_IIP(r28); /* M */ \
|
||||
mov r21=ar.fpsr; /* M */ \
|
||||
mov r26=ar.pfs; /* I */ \
|
||||
__COVER; /* B;; (or nothing) */ \
|
||||
adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \
|
||||
;; \
|
||||
ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \
|
||||
st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \
|
||||
adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \
|
||||
/* switch from user to kernel RBS: */ \
|
||||
;; \
|
||||
invala; /* M */ \
|
||||
/* SAVE_IFS;*/ /* see xen special handling below */ \
|
||||
cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \
|
||||
;; \
|
||||
(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
|
||||
;; \
|
||||
(pUStk) mov.m r24=ar.rnat; \
|
||||
(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
|
||||
(pKStk) mov r1=sp; /* get sp */ \
|
||||
;; \
|
||||
(pUStk) lfetch.fault.excl.nt1 [r22]; \
|
||||
(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
|
||||
(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
|
||||
;; \
|
||||
(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
|
||||
(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
|
||||
;; \
|
||||
(pUStk) mov r18=ar.bsp; \
|
||||
(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
|
||||
adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
|
||||
adds r16=PT(CR_IPSR),r1; \
|
||||
;; \
|
||||
lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
|
||||
st8 [r16]=r29; /* save cr.ipsr */ \
|
||||
;; \
|
||||
lfetch.fault.excl.nt1 [r17]; \
|
||||
tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \
|
||||
mov r29=b0 \
|
||||
;; \
|
||||
WORKAROUND; \
|
||||
adds r16=PT(R8),r1; /* initialize first base pointer */ \
|
||||
adds r17=PT(R9),r1; /* initialize second base pointer */ \
|
||||
(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
|
||||
;; \
|
||||
.mem.offset 0,0; st8.spill [r16]=r8,16; \
|
||||
.mem.offset 8,0; st8.spill [r17]=r9,16; \
|
||||
;; \
|
||||
.mem.offset 0,0; st8.spill [r16]=r10,24; \
|
||||
movl r8=XSI_PRECOVER_IFS; \
|
||||
.mem.offset 8,0; st8.spill [r17]=r11,24; \
|
||||
;; \
|
||||
/* xen special handling for possibly lazy cover */ \
|
||||
/* SAVE_MIN case in dispatch_ia32_handler: mov r30=r0 */ \
|
||||
ld8 r30=[r8]; \
|
||||
(pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \
|
||||
st8 [r16]=r28,16; /* save cr.iip */ \
|
||||
;; \
|
||||
st8 [r17]=r30,16; /* save cr.ifs */ \
|
||||
mov r8=ar.ccv; \
|
||||
mov r9=ar.csd; \
|
||||
mov r10=ar.ssd; \
|
||||
movl r11=FPSR_DEFAULT; /* L-unit */ \
|
||||
;; \
|
||||
st8 [r16]=r25,16; /* save ar.unat */ \
|
||||
st8 [r17]=r26,16; /* save ar.pfs */ \
|
||||
shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
|
||||
;; \
|
||||
st8 [r16]=r27,16; /* save ar.rsc */ \
|
||||
(pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \
|
||||
(pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \
|
||||
;; /* avoid RAW on r16 & r17 */ \
|
||||
(pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \
|
||||
st8 [r17]=r31,16; /* save predicates */ \
|
||||
(pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \
|
||||
;; \
|
||||
st8 [r16]=r29,16; /* save b0 */ \
|
||||
st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
|
||||
cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
|
||||
;; \
|
||||
.mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \
|
||||
.mem.offset 8,0; st8.spill [r17]=r12,16; \
|
||||
adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
|
||||
;; \
|
||||
.mem.offset 0,0; st8.spill [r16]=r13,16; \
|
||||
.mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \
|
||||
mov r13=IA64_KR(CURRENT); /* establish `current' */ \
|
||||
;; \
|
||||
.mem.offset 0,0; st8.spill [r16]=r15,16; \
|
||||
.mem.offset 8,0; st8.spill [r17]=r14,16; \
|
||||
;; \
|
||||
.mem.offset 0,0; st8.spill [r16]=r2,16; \
|
||||
.mem.offset 8,0; st8.spill [r17]=r3,16; \
|
||||
XEN_ACCOUNT_GET_STAMP \
|
||||
adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
|
||||
;; \
|
||||
EXTRA; \
|
||||
movl r1=__gp; /* establish kernel global pointer */ \
|
||||
;; \
|
||||
ACCOUNT_SYS_ENTER \
|
||||
BSW_1(r3,r14); /* switch back to bank 1 (must be last in insn group) */ \
|
||||
;;
|
@@ -1,38 +0,0 @@
|
||||
#ifndef _ASM_IA64_XEN_PAGE_COHERENT_H
|
||||
#define _ASM_IA64_XEN_PAGE_COHERENT_H
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <linux/dma-attrs.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flags,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
void *vstart = (void*)__get_free_pages(flags, get_order(size));
|
||||
*dma_handle = virt_to_phys(vstart);
|
||||
return vstart;
|
||||
}
|
||||
|
||||
static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
free_pages((unsigned long) cpu_addr, get_order(size));
|
||||
}
|
||||
|
||||
static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs) { }
|
||||
|
||||
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs) { }
|
||||
|
||||
static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
|
||||
|
||||
static inline void xen_dma_sync_single_for_device(struct device *hwdev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
|
||||
|
||||
#endif /* _ASM_IA64_XEN_PAGE_COHERENT_H */
|
@@ -1,65 +0,0 @@
|
||||
/******************************************************************************
|
||||
* arch/ia64/include/asm/xen/page.h
|
||||
*
|
||||
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
|
||||
* VA Linux Systems Japan K.K.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _ASM_IA64_XEN_PAGE_H
|
||||
#define _ASM_IA64_XEN_PAGE_H
|
||||
|
||||
#define INVALID_P2M_ENTRY (~0UL)
|
||||
|
||||
static inline unsigned long mfn_to_pfn(unsigned long mfn)
|
||||
{
|
||||
return mfn;
|
||||
}
|
||||
|
||||
static inline unsigned long pfn_to_mfn(unsigned long pfn)
|
||||
{
|
||||
return pfn;
|
||||
}
|
||||
|
||||
#define phys_to_machine_mapping_valid(_x) (1)
|
||||
|
||||
static inline void *mfn_to_virt(unsigned long mfn)
|
||||
{
|
||||
return __va(mfn << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static inline unsigned long virt_to_mfn(void *virt)
|
||||
{
|
||||
return __pa(virt) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
/* for tpmfront.c */
|
||||
static inline unsigned long virt_to_machine(void *virt)
|
||||
{
|
||||
return __pa(virt);
|
||||
}
|
||||
|
||||
static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
|
||||
{
|
||||
/* nothing */
|
||||
}
|
||||
|
||||
#define pte_mfn(_x) pte_pfn(_x)
|
||||
#define mfn_pte(_x, _y) __pte_ma(0) /* unmodified use */
|
||||
#define __pte_ma(_x) ((pte_t) {(_x)}) /* unmodified use */
|
||||
|
||||
#endif /* _ASM_IA64_XEN_PAGE_H */
|
@@ -1,38 +0,0 @@
|
||||
/******************************************************************************
|
||||
* arch/ia64/include/asm/xen/patchlist.h
|
||||
*
|
||||
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
|
||||
* VA Linux Systems Japan K.K.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*
|
||||
*/
|
||||
|
||||
#define __paravirt_start_gate_fsyscall_patchlist \
|
||||
__xen_start_gate_fsyscall_patchlist
|
||||
#define __paravirt_end_gate_fsyscall_patchlist \
|
||||
__xen_end_gate_fsyscall_patchlist
|
||||
#define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \
|
||||
__xen_start_gate_brl_fsys_bubble_down_patchlist
|
||||
#define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \
|
||||
__xen_end_gate_brl_fsys_bubble_down_patchlist
|
||||
#define __paravirt_start_gate_vtop_patchlist \
|
||||
__xen_start_gate_vtop_patchlist
|
||||
#define __paravirt_end_gate_vtop_patchlist \
|
||||
__xen_end_gate_vtop_patchlist
|
||||
#define __paravirt_start_gate_mckinley_e9_patchlist \
|
||||
__xen_start_gate_mckinley_e9_patchlist
|
||||
#define __paravirt_end_gate_mckinley_e9_patchlist \
|
||||
__xen_end_gate_mckinley_e9_patchlist
|
@@ -1,135 +0,0 @@
|
||||
#ifndef _ASM_IA64_XEN_PRIVOP_H
|
||||
#define _ASM_IA64_XEN_PRIVOP_H
|
||||
|
||||
/*
|
||||
* Copyright (C) 2005 Hewlett-Packard Co
|
||||
* Dan Magenheimer <dan.magenheimer@hp.com>
|
||||
*
|
||||
* Paravirtualizations of privileged operations for Xen/ia64
|
||||
*
|
||||
*
|
||||
* inline privop and paravirt_alt support
|
||||
* Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
|
||||
* VA Linux Systems Japan K.K.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/types.h> /* arch-ia64.h requires uint64_t */
|
||||
#endif
|
||||
#include <asm/xen/interface.h>
|
||||
|
||||
/* At 1 MB, before per-cpu space but still addressable using addl instead
|
||||
of movl. */
|
||||
#define XSI_BASE 0xfffffffffff00000
|
||||
|
||||
/* Address of mapped regs. */
|
||||
#define XMAPPEDREGS_BASE (XSI_BASE + XSI_SIZE)
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
#define XEN_HYPER_RFI break HYPERPRIVOP_RFI
|
||||
#define XEN_HYPER_RSM_PSR_DT break HYPERPRIVOP_RSM_DT
|
||||
#define XEN_HYPER_SSM_PSR_DT break HYPERPRIVOP_SSM_DT
|
||||
#define XEN_HYPER_COVER break HYPERPRIVOP_COVER
|
||||
#define XEN_HYPER_ITC_D break HYPERPRIVOP_ITC_D
|
||||
#define XEN_HYPER_ITC_I break HYPERPRIVOP_ITC_I
|
||||
#define XEN_HYPER_SSM_I break HYPERPRIVOP_SSM_I
|
||||
#define XEN_HYPER_GET_IVR break HYPERPRIVOP_GET_IVR
|
||||
#define XEN_HYPER_THASH break HYPERPRIVOP_THASH
|
||||
#define XEN_HYPER_ITR_D break HYPERPRIVOP_ITR_D
|
||||
#define XEN_HYPER_SET_KR break HYPERPRIVOP_SET_KR
|
||||
#define XEN_HYPER_GET_PSR break HYPERPRIVOP_GET_PSR
|
||||
#define XEN_HYPER_SET_RR0_TO_RR4 break HYPERPRIVOP_SET_RR0_TO_RR4
|
||||
|
||||
#define XSI_IFS (XSI_BASE + XSI_IFS_OFS)
|
||||
#define XSI_PRECOVER_IFS (XSI_BASE + XSI_PRECOVER_IFS_OFS)
|
||||
#define XSI_IFA (XSI_BASE + XSI_IFA_OFS)
|
||||
#define XSI_ISR (XSI_BASE + XSI_ISR_OFS)
|
||||
#define XSI_IIM (XSI_BASE + XSI_IIM_OFS)
|
||||
#define XSI_ITIR (XSI_BASE + XSI_ITIR_OFS)
|
||||
#define XSI_PSR_I_ADDR (XSI_BASE + XSI_PSR_I_ADDR_OFS)
|
||||
#define XSI_PSR_IC (XSI_BASE + XSI_PSR_IC_OFS)
|
||||
#define XSI_IPSR (XSI_BASE + XSI_IPSR_OFS)
|
||||
#define XSI_IIP (XSI_BASE + XSI_IIP_OFS)
|
||||
#define XSI_B1NAT (XSI_BASE + XSI_B1NATS_OFS)
|
||||
#define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS)
|
||||
#define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS)
|
||||
#define XSI_IHA (XSI_BASE + XSI_IHA_OFS)
|
||||
#define XSI_ITC_OFFSET (XSI_BASE + XSI_ITC_OFFSET_OFS)
|
||||
#define XSI_ITC_LAST (XSI_BASE + XSI_ITC_LAST_OFS)
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/************************************************/
|
||||
/* Instructions paravirtualized for correctness */
|
||||
/************************************************/
|
||||
|
||||
/* "fc" and "thash" are privilege-sensitive instructions, meaning they
|
||||
* may have different semantics depending on whether they are executed
|
||||
* at PL0 vs PL!=0. When paravirtualized, these instructions mustn't
|
||||
* be allowed to execute directly, lest incorrect semantics result. */
|
||||
extern void xen_fc(void *addr);
|
||||
extern unsigned long xen_thash(unsigned long addr);
|
||||
|
||||
/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
|
||||
* is not currently used (though it may be in a long-format VHPT system!)
|
||||
* and the semantics of cover only change if psr.ic is off which is very
|
||||
* rare (and currently non-existent outside of assembly code */
|
||||
|
||||
/* There are also privilege-sensitive registers. These registers are
|
||||
* readable at any privilege level but only writable at PL0. */
|
||||
extern unsigned long xen_get_cpuid(int index);
|
||||
extern unsigned long xen_get_pmd(int index);
|
||||
|
||||
#ifndef ASM_SUPPORTED
|
||||
extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */
|
||||
extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */
|
||||
#endif
|
||||
|
||||
/************************************************/
|
||||
/* Instructions paravirtualized for performance */
|
||||
/************************************************/
|
||||
|
||||
/* Xen uses memory-mapped virtual privileged registers for access to many
|
||||
* performance-sensitive privileged registers. Some, like the processor
|
||||
* status register (psr), are broken up into multiple memory locations.
|
||||
* Others, like "pend", are abstractions based on privileged registers.
|
||||
* "Pend" is guaranteed to be set if reading cr.ivr would return a
|
||||
* (non-spurious) interrupt. */
|
||||
#define XEN_MAPPEDREGS ((struct mapped_regs *)XMAPPEDREGS_BASE)
|
||||
|
||||
#define XSI_PSR_I \
|
||||
(*XEN_MAPPEDREGS->interrupt_mask_addr)
|
||||
#define xen_get_virtual_psr_i() \
|
||||
(!XSI_PSR_I)
|
||||
#define xen_set_virtual_psr_i(_val) \
|
||||
({ XSI_PSR_I = (uint8_t)(_val) ? 0 : 1; })
|
||||
#define xen_set_virtual_psr_ic(_val) \
|
||||
({ XEN_MAPPEDREGS->interrupt_collection_enabled = _val ? 1 : 0; })
|
||||
#define xen_get_virtual_pend() \
|
||||
(*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1))
|
||||
|
||||
#ifndef ASM_SUPPORTED
|
||||
/* Although all privileged operations can be left to trap and will
|
||||
* be properly handled by Xen, some are frequent enough that we use
|
||||
* hyperprivops for performance. */
|
||||
extern unsigned long xen_get_psr(void);
|
||||
extern unsigned long xen_get_ivr(void);
|
||||
extern unsigned long xen_get_tpr(void);
|
||||
extern void xen_hyper_ssm_i(void);
|
||||
extern void xen_set_itm(unsigned long);
|
||||
extern void xen_set_tpr(unsigned long);
|
||||
extern void xen_eoi(unsigned long);
|
||||
extern unsigned long xen_get_rr(unsigned long index);
|
||||
extern void xen_set_rr(unsigned long index, unsigned long val);
|
||||
extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
|
||||
unsigned long val2, unsigned long val3,
|
||||
unsigned long val4);
|
||||
extern void xen_set_kr(unsigned long index, unsigned long val);
|
||||
extern void xen_ptcga(unsigned long addr, unsigned long size);
|
||||
#endif /* !ASM_SUPPORTED */
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_IA64_XEN_PRIVOP_H */
|
@@ -1,51 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2006 Tristan Gingold <tristan.gingold@bull.net>, Bull SAS
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#ifndef _ASM_IA64_XEN_XCOM_HCALL_H
|
||||
#define _ASM_IA64_XEN_XCOM_HCALL_H
|
||||
|
||||
/* These function creates inline or mini descriptor for the parameters and
|
||||
calls the corresponding xencomm_arch_hypercall_X.
|
||||
Architectures should defines HYPERVISOR_xxx as xencomm_hypercall_xxx unless
|
||||
they want to use their own wrapper. */
|
||||
extern int xencomm_hypercall_console_io(int cmd, int count, char *str);
|
||||
|
||||
extern int xencomm_hypercall_event_channel_op(int cmd, void *op);
|
||||
|
||||
extern int xencomm_hypercall_xen_version(int cmd, void *arg);
|
||||
|
||||
extern int xencomm_hypercall_physdev_op(int cmd, void *op);
|
||||
|
||||
extern int xencomm_hypercall_grant_table_op(unsigned int cmd, void *op,
|
||||
unsigned int count);
|
||||
|
||||
extern int xencomm_hypercall_sched_op(int cmd, void *arg);
|
||||
|
||||
extern int xencomm_hypercall_multicall(void *call_list, int nr_calls);
|
||||
|
||||
extern int xencomm_hypercall_callback_op(int cmd, void *arg);
|
||||
|
||||
extern int xencomm_hypercall_memory_op(unsigned int cmd, void *arg);
|
||||
|
||||
extern int xencomm_hypercall_suspend(unsigned long srec);
|
||||
|
||||
extern long xencomm_hypercall_vcpu_op(int cmd, int cpu, void *arg);
|
||||
|
||||
extern long xencomm_hypercall_opt_feature(void *arg);
|
||||
|
||||
#endif /* _ASM_IA64_XEN_XCOM_HCALL_H */
|
@@ -1,42 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#ifndef _ASM_IA64_XEN_XENCOMM_H
|
||||
#define _ASM_IA64_XEN_XENCOMM_H
|
||||
|
||||
#include <xen/xencomm.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
/* Must be called before any hypercall. */
|
||||
extern void xencomm_initialize(void);
|
||||
extern int xencomm_is_initialized(void);
|
||||
|
||||
/* Check if virtual contiguity means physical contiguity
|
||||
* where the passed address is a pointer value in virtual address.
|
||||
* On ia64, identity mapping area in region 7 or the piece of region 5
|
||||
* that is mapped by itr[IA64_TR_KERNEL]/dtr[IA64_TR_KERNEL]
|
||||
*/
|
||||
static inline int xencomm_is_phys_contiguous(unsigned long addr)
|
||||
{
|
||||
return (PAGE_OFFSET <= addr &&
|
||||
addr < (PAGE_OFFSET + (1UL << IA64_MAX_PHYS_BITS))) ||
|
||||
(KERNEL_START <= addr &&
|
||||
addr < KERNEL_START + KERNEL_TR_PAGE_SIZE);
|
||||
}
|
||||
|
||||
#endif /* _ASM_IA64_XEN_XENCOMM_H */
|
@@ -20,13 +20,4 @@
|
||||
*/
|
||||
#define __IA64_BREAK_SYSCALL 0x100000
|
||||
|
||||
/*
|
||||
* Xen specific break numbers:
|
||||
*/
|
||||
#define __IA64_XEN_HYPERCALL 0x1000
|
||||
/* [__IA64_XEN_HYPERPRIVOP_START, __IA64_XEN_HYPERPRIVOP_MAX] is used
|
||||
for xen hyperprivops */
|
||||
#define __IA64_XEN_HYPERPRIVOP_START 0x1
|
||||
#define __IA64_XEN_HYPERPRIVOP_MAX 0x1a
|
||||
|
||||
#endif /* _ASM_IA64_BREAK_H */
|
||||
|
Reference in New Issue
Block a user