--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * (C) 2014 Karim Allah Ahmed <karim.allah.ahmed@gmail.com>
+ * (C) 2020, EPAM Systems Inc.
+ */
+#ifndef _ASM_ARM_XEN_SYSTEM_H
+#define _ASM_ARM_XEN_SYSTEM_H
+
+#include <compiler.h>
+#include <asm/bitops.h>
+
+/* If *ptr == old, then store new there (and return new).
+ * Otherwise, return the old value.
+ * Atomic.
+ */
+#define synch_cmpxchg(ptr, old, new) \
+({ __typeof__(*ptr) stored = old; \
+ __atomic_compare_exchange_n(ptr, &stored, new, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ? new : old; \
+})
+
+/* As test_and_clear_bit, but using __ATOMIC_SEQ_CST */
+static inline int synch_test_and_clear_bit(int nr, volatile void *addr)
+{
+ u8 *byte = ((u8 *)addr) + (nr >> 3);
+ u8 bit = 1 << (nr & 7);
+ u8 orig;
+
+ orig = __atomic_fetch_and(byte, ~bit, __ATOMIC_SEQ_CST);
+
+ return (orig & bit) != 0;
+}
+
+/* As test_and_set_bit, but using __ATOMIC_SEQ_CST */
+static inline int synch_test_and_set_bit(int nr, volatile void *base)
+{
+ u8 *byte = ((u8 *)base) + (nr >> 3);
+ u8 bit = 1 << (nr & 7);
+ u8 orig;
+
+ orig = __atomic_fetch_or(byte, bit, __ATOMIC_SEQ_CST);
+
+ return (orig & bit) != 0;
+}
+
+/* As set_bit, but using __ATOMIC_SEQ_CST */
+static inline void synch_set_bit(int nr, volatile void *addr)
+{
+ synch_test_and_set_bit(nr, addr);
+}
+
+/* As clear_bit, but using __ATOMIC_SEQ_CST */
+static inline void synch_clear_bit(int nr, volatile void *addr)
+{
+ synch_test_and_clear_bit(nr, addr);
+}
+
+/* As test_bit, but with a following memory barrier. */
+//static inline int synch_test_bit(int nr, volatile void *addr)
+static inline int synch_test_bit(int nr, const void *addr)
+{
+ int result;
+
+ result = test_bit(nr, addr);
+ barrier();
+ return result;
+}
+
+#define xchg(ptr, v) __atomic_exchange_n(ptr, v, __ATOMIC_SEQ_CST)
+#define xchg(ptr, v) __atomic_exchange_n(ptr, v, __ATOMIC_SEQ_CST)
+
+#define xen_mb() mb()
+#define xen_rmb() rmb()
+#define xen_wmb() wmb()
+
+#define to_phys(x) ((unsigned long)(x))
+#define to_virt(x) ((void *)(x))
+
+#define PFN_UP(x) (unsigned long)(((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
+#define PFN_DOWN(x) (unsigned long)((x) >> PAGE_SHIFT)
+#define PFN_PHYS(x) ((unsigned long)(x) << PAGE_SHIFT)
+#define PHYS_PFN(x) (unsigned long)((x) >> PAGE_SHIFT)
+
+#define virt_to_pfn(_virt) (PFN_DOWN(to_phys(_virt)))
+#define virt_to_mfn(_virt) (PFN_DOWN(to_phys(_virt)))
+#define mfn_to_virt(_mfn) (to_virt(PFN_PHYS(_mfn)))
+#define pfn_to_virt(_pfn) (to_virt(PFN_PHYS(_pfn)))
+
+#endif
--- /dev/null
+// SPDX-License-Identifier: MIT License
+/*
+ * hypervisor.c
+ *
+ * Communication to/from hypervisor.
+ *
+ * Copyright (c) 2002-2003, K A Fraser
+ * Copyright (c) 2005, Grzegorz Milos, gm281@cam.ac.uk,Intel Research Cambridge
+ * Copyright (c) 2020, EPAM Systems Inc.
+ */
+#include <common.h>
+#include <cpu_func.h>
+#include <log.h>
+#include <memalign.h>
+
+#include <asm/io.h>
+#include <asm/armv8/mmu.h>
+#include <asm/xen/system.h>
+
+#include <linux/bug.h>
+
+#include <xen/hvm.h>
+#include <xen/interface/memory.h>
+
+#define active_evtchns(cpu, sh, idx) \
+ ((sh)->evtchn_pending[idx] & \
+ ~(sh)->evtchn_mask[idx])
+
+int in_callback;
+
+/*
+ * Shared page for communicating with the hypervisor.
+ * Events flags go here, for example.
+ */
+struct shared_info *HYPERVISOR_shared_info;
+
+static const char *param_name(int op)
+{
+#define PARAM(x)[HVM_PARAM_##x] = #x
+ static const char *const names[] = {
+ PARAM(CALLBACK_IRQ),
+ PARAM(STORE_PFN),
+ PARAM(STORE_EVTCHN),
+ PARAM(PAE_ENABLED),
+ PARAM(IOREQ_PFN),
+ PARAM(VPT_ALIGN),
+ PARAM(CONSOLE_PFN),
+ PARAM(CONSOLE_EVTCHN),
+ };
+#undef PARAM
+
+ if (op >= ARRAY_SIZE(names))
+ return "unknown";
+
+ if (!names[op])
+ return "reserved";
+
+ return names[op];
+}
+
+/**
+ * hvm_get_parameter_maintain_dcache - function to obtain a HVM
+ * parameter value.
+ * @idx: HVM parameter index
+ * @value: Value to fill in
+ *
+ * According to Xen on ARM ABI (xen/include/public/arch-arm.h):
+ * all memory which is shared with other entities in the system
+ * (including the hypervisor and other guests) must reside in memory
+ * which is mapped as Normal Inner Write-Back Outer Write-Back
+ * Inner-Shareable.
+ *
+ * Thus, page attributes must be equally set for all the entities
+ * working with that page.
+ *
+ * Before MMU setup the data cache is turned off, so it means that
+ * manual data cache maintenance is required, because of the
+ * difference of page attributes.
+ */
+int hvm_get_parameter_maintain_dcache(int idx, uint64_t *value)
+{
+ struct xen_hvm_param xhv;
+ int ret;
+
+ invalidate_dcache_range((unsigned long)&xhv,
+ (unsigned long)&xhv + sizeof(xhv));
+ xhv.domid = DOMID_SELF;
+ xhv.index = idx;
+ invalidate_dcache_range((unsigned long)&xhv,
+ (unsigned long)&xhv + sizeof(xhv));
+
+ ret = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
+ if (ret < 0) {
+ pr_err("Cannot get hvm parameter %s (%d): %d!\n",
+ param_name(idx), idx, ret);
+ BUG();
+ }
+ invalidate_dcache_range((unsigned long)&xhv,
+ (unsigned long)&xhv + sizeof(xhv));
+
+ *value = xhv.value;
+
+ return ret;
+}
+
+int hvm_get_parameter(int idx, uint64_t *value)
+{
+ struct xen_hvm_param xhv;
+ int ret;
+
+ xhv.domid = DOMID_SELF;
+ xhv.index = idx;
+ ret = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
+ if (ret < 0) {
+ pr_err("Cannot get hvm parameter %s (%d): %d!\n",
+ param_name(idx), idx, ret);
+ BUG();
+ }
+
+ *value = xhv.value;
+
+ return ret;
+}
+
+struct shared_info *map_shared_info(void *p)
+{
+ struct xen_add_to_physmap xatp;
+
+ HYPERVISOR_shared_info = (struct shared_info *)memalign(PAGE_SIZE,
+ PAGE_SIZE);
+ if (!HYPERVISOR_shared_info)
+ BUG();
+
+ xatp.domid = DOMID_SELF;
+ xatp.idx = 0;
+ xatp.space = XENMAPSPACE_shared_info;
+ xatp.gpfn = virt_to_pfn(HYPERVISOR_shared_info);
+ if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp) != 0)
+ BUG();
+
+ return HYPERVISOR_shared_info;
+}
+
+void do_hypervisor_callback(struct pt_regs *regs)
+{
+ unsigned long l1, l2, l1i, l2i;
+ unsigned int port;
+ int cpu = 0;
+ struct shared_info *s = HYPERVISOR_shared_info;
+ struct vcpu_info *vcpu_info = &s->vcpu_info[cpu];
+
+ in_callback = 1;
+
+ vcpu_info->evtchn_upcall_pending = 0;
+ l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
+
+ while (l1 != 0) {
+ l1i = __ffs(l1);
+ l1 &= ~(1UL << l1i);
+
+ while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
+ l2i = __ffs(l2);
+ l2 &= ~(1UL << l2i);
+
+ port = (l1i * (sizeof(unsigned long) * 8)) + l2i;
+ /* TODO: handle new event: do_event(port, regs); */
+ /* Suppress -Wunused-but-set-variable */
+ (void)(port);
+ }
+ }
+
+ in_callback = 0;
+}
+
+void force_evtchn_callback(void)
+{
+#ifdef XEN_HAVE_PV_UPCALL_MASK
+ int save;
+#endif
+ struct vcpu_info *vcpu;
+
+ vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];
+#ifdef XEN_HAVE_PV_UPCALL_MASK
+ save = vcpu->evtchn_upcall_mask;
+#endif
+
+ while (vcpu->evtchn_upcall_pending) {
+#ifdef XEN_HAVE_PV_UPCALL_MASK
+ vcpu->evtchn_upcall_mask = 1;
+#endif
+ do_hypervisor_callback(NULL);
+#ifdef XEN_HAVE_PV_UPCALL_MASK
+ vcpu->evtchn_upcall_mask = save;
+#endif
+ };
+}
+
+void mask_evtchn(uint32_t port)
+{
+ struct shared_info *s = HYPERVISOR_shared_info;
+
+ synch_set_bit(port, &s->evtchn_mask[0]);
+}
+
+void unmask_evtchn(uint32_t port)
+{
+ struct shared_info *s = HYPERVISOR_shared_info;
+ struct vcpu_info *vcpu_info = &s->vcpu_info[smp_processor_id()];
+
+ synch_clear_bit(port, &s->evtchn_mask[0]);
+
+ /*
+ * Just like a real IO-APIC we 'lose the interrupt edge' if the
+ * channel is masked.
+ */
+ if (synch_test_bit(port, &s->evtchn_pending[0]) &&
+ !synch_test_and_set_bit(port / (sizeof(unsigned long) * 8),
+ &vcpu_info->evtchn_pending_sel)) {
+ vcpu_info->evtchn_upcall_pending = 1;
+#ifdef XEN_HAVE_PV_UPCALL_MASK
+ if (!vcpu_info->evtchn_upcall_mask)
+#endif
+ force_evtchn_callback();
+ }
+}
+
+void clear_evtchn(uint32_t port)
+{
+ struct shared_info *s = HYPERVISOR_shared_info;
+
+ synch_clear_bit(port, &s->evtchn_pending[0]);
+}
+
+void xen_init(void)
+{
+ debug("%s\n", __func__);
+
+ map_shared_info(NULL);
+}
+