diff options
author | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2023-04-11 09:33:45 +0200 |
---|---|---|
committer | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2023-04-11 09:33:45 +0200 |
commit | 13e393e8d43c3509956ffee5cf27fc2311b649fd (patch) | |
tree | 751622922deea1bec9001ff3b410ff6263c3a87d /i386 | |
parent | b1cddee81d48a7fe2350d334aa756999fd265920 (diff) | |
download | gnumach-13e393e8d43c3509956ffee5cf27fc2311b649fd.tar.gz gnumach-13e393e8d43c3509956ffee5cf27fc2311b649fd.tar.bz2 gnumach-13e393e8d43c3509956ffee5cf27fc2311b649fd.zip |
Fix Xen build
Diffstat (limited to 'i386')
-rw-r--r-- | i386/i386/xen.h | 42 | ||||
-rw-r--r-- | i386/i386at/model_dep.c | 2 | ||||
-rw-r--r-- | i386/intel/pmap.c | 18 |
3 files changed, 32 insertions, 30 deletions
diff --git a/i386/i386/xen.h b/i386/i386/xen.h index 8a17748a..2cd81be8 100644 --- a/i386/i386/xen.h +++ b/i386/i386/xen.h @@ -36,7 +36,7 @@ #define mb() __asm__ __volatile__("lock; addl $0,0(%%esp)":::"memory") #define rmb() mb() #define wmb() mb() -MACH_INLINE unsigned long xchgl(volatile unsigned long *ptr, unsigned long x) +static inline unsigned long xchgl(volatile unsigned long *ptr, unsigned long x) { __asm__ __volatile__("xchg %0, %1" : "=r" (x) @@ -66,7 +66,7 @@ MACH_INLINE unsigned long xchgl(volatile unsigned long *ptr, unsigned long x) /* x86-specific hypercall interface. */ #define _hypcall0(type, name) \ -MACH_INLINE type hyp_##name(void) \ +static inline type hyp_##name(void) \ { \ unsigned long __ret; \ asm volatile ("call hypcalls+("TOSTR(__HYPERVISOR_##name)"*32)" \ @@ -76,7 +76,7 @@ MACH_INLINE type hyp_##name(void) \ } #define _hypcall1(type, name, type1, arg1) \ -MACH_INLINE type hyp_##name(type1 arg1) \ +static inline type hyp_##name(type1 arg1) \ { \ unsigned long __ret; \ register unsigned long __arg1 asm(_hypcall_arg1) = (unsigned long) arg1; \ @@ -88,7 +88,7 @@ MACH_INLINE type hyp_##name(type1 arg1) \ } #define _hypcall2(type, name, type1, arg1, type2, arg2) \ -MACH_INLINE type hyp_##name(type1 arg1, type2 arg2) \ +static inline type hyp_##name(type1 arg1, type2 arg2) \ { \ unsigned long __ret; \ register unsigned long __arg1 asm(_hypcall_arg1) = (unsigned long) arg1; \ @@ -102,7 +102,7 @@ MACH_INLINE type hyp_##name(type1 arg1, type2 arg2) \ } #define _hypcall3(type, name, type1, arg1, type2, arg2, type3, arg3) \ -MACH_INLINE type hyp_##name(type1 arg1, type2 arg2, type3 arg3) \ +static inline type hyp_##name(type1 arg1, type2 arg2, type3 arg3) \ { \ unsigned long __ret; \ register unsigned long __arg1 asm(_hypcall_arg1) = (unsigned long) arg1; \ @@ -118,7 +118,7 @@ MACH_INLINE type hyp_##name(type1 arg1, type2 arg2, type3 arg3) \ } #define _hypcall4(type, name, type1, arg1, type2, arg2, type3, arg3, type4, arg4) \ -MACH_INLINE type hyp_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ +static inline type hyp_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ { \ unsigned long __ret; \ register unsigned long __arg1 asm(_hypcall_arg1) = (unsigned long) arg1; \ @@ -136,7 +136,7 @@ MACH_INLINE type hyp_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \ } #define _hypcall5(type, name, type1, arg1, type2, arg2, type3, arg3, type4, arg4, type5, arg5) \ -MACH_INLINE type hyp_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \ +static inline type hyp_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \ { \ unsigned long __ret; \ register unsigned long __arg1 asm(_hypcall_arg1) = (unsigned long) arg1; \ @@ -165,7 +165,7 @@ _hypcall1(long, set_trap_table, vm_offset_t /* struct trap_info * */, traps); #ifdef MACH_PV_PAGETABLES _hypcall4(int, mmu_update, vm_offset_t /* struct mmu_update * */, req, int, count, vm_offset_t /* int * */, success_count, domid_t, domid) -MACH_INLINE int hyp_mmu_update_pte(pt_entry_t pte, pt_entry_t val) +static inline int hyp_mmu_update_pte(pt_entry_t pte, pt_entry_t val) { struct mmu_update update = { @@ -221,7 +221,7 @@ _hypcall2(long, set_segment_base, int, reg, unsigned long, value); #include <xen/public/memory.h> _hypcall2(long, memory_op, unsigned long, cmd, vm_offset_t /* void * */, arg); -MACH_INLINE void hyp_free_mfn(unsigned long mfn) +static inline void hyp_free_mfn(unsigned long mfn) { struct xen_memory_reservation reservation; reservation.extent_start = (void*) kvtolin(&mfn); @@ -245,7 +245,7 @@ _hypcall3(int, update_va_mapping, unsigned long, va, unsigned long, val, unsigne #define hyp_do_update_va_mapping(va, val, flags) hyp_update_va_mapping(va, val, flags) #endif -MACH_INLINE void hyp_free_page(unsigned long pfn, void *va) +static inline void hyp_free_page(unsigned long pfn, void *va) { /* save mfn */ unsigned long mfn = pfn_to_mfn(pfn); @@ -267,7 +267,7 @@ MACH_INLINE void hyp_free_page(unsigned long pfn, void *va) #ifdef MACH_PV_PAGETABLES _hypcall4(int, mmuext_op, vm_offset_t /* struct mmuext_op * */, op, int, count, vm_offset_t /* int * */, success_count, domid_t, domid); -MACH_INLINE int hyp_mmuext_op_void(unsigned int cmd) +static inline int hyp_mmuext_op_void(unsigned int cmd) { struct mmuext_op op = { .cmd = cmd, @@ -276,7 +276,7 @@ MACH_INLINE int hyp_mmuext_op_void(unsigned int cmd) hyp_mmuext_op(kv_to_la(&op), 1, kv_to_la(&count), DOMID_SELF); return count; } -MACH_INLINE int hyp_mmuext_op_mfn(unsigned int cmd, unsigned long mfn) +static inline int hyp_mmuext_op_mfn(unsigned int cmd, unsigned long mfn) { struct mmuext_op op = { .cmd = cmd, @@ -286,7 +286,7 @@ MACH_INLINE int hyp_mmuext_op_mfn(unsigned int cmd, unsigned long mfn) hyp_mmuext_op(kv_to_la(&op), 1, kv_to_la(&count), DOMID_SELF); return count; } -MACH_INLINE void hyp_set_ldt(void *ldt, unsigned long nbentries) { +static inline void hyp_set_ldt(void *ldt, unsigned long nbentries) { struct mmuext_op op = { .cmd = MMUEXT_SET_LDT, .arg1.linear_addr = kvtolin(ldt), @@ -303,7 +303,7 @@ MACH_INLINE void hyp_set_ldt(void *ldt, unsigned long nbentries) { } #define hyp_set_cr3(value) hyp_mmuext_op_mfn(MMUEXT_NEW_BASEPTR, pa_to_mfn(value)) #define hyp_set_user_cr3(value) hyp_mmuext_op_mfn(MMUEXT_NEW_USER_BASEPTR, pa_to_mfn(value)) -MACH_INLINE void hyp_invlpg(vm_offset_t lin) { +static inline void hyp_invlpg(vm_offset_t lin) { struct mmuext_op ops; int n; ops.cmd = MMUEXT_INVLPG_ALL; @@ -328,14 +328,14 @@ _hypcall1(long, set_timer_op, unsigned long, absolute); #include <xen/public/event_channel.h> _hypcall1(int, event_channel_op, vm_offset_t /* evtchn_op_t * */, op); -MACH_INLINE int hyp_event_channel_send(evtchn_port_t port) { +static inline int hyp_event_channel_send(evtchn_port_t port) { evtchn_op_t op = { .cmd = EVTCHNOP_send, .u.send.port = port, }; return hyp_event_channel_op(kvtolin(&op)); } -MACH_INLINE evtchn_port_t hyp_event_channel_alloc(domid_t domid) { +static inline evtchn_port_t hyp_event_channel_alloc(domid_t domid) { evtchn_op_t op = { .cmd = EVTCHNOP_alloc_unbound, .u.alloc_unbound.dom = DOMID_SELF, @@ -345,7 +345,7 @@ MACH_INLINE evtchn_port_t hyp_event_channel_alloc(domid_t domid) { panic("couldn't allocate event channel"); return op.u.alloc_unbound.port; } -MACH_INLINE evtchn_port_t hyp_event_channel_bind_virq(uint32_t virq, uint32_t vcpu) { +static inline evtchn_port_t hyp_event_channel_bind_virq(uint32_t virq, uint32_t vcpu) { evtchn_op_t op = { .cmd = EVTCHNOP_bind_virq, .u.bind_virq = { .virq = virq, .vcpu = vcpu }}; if (hyp_event_channel_op(kvtolin(&op))) panic("can't bind virq %d\n",virq); @@ -364,7 +364,7 @@ _hypcall0(long, iret); _hypcall2(long, sched_op, int, cmd, vm_offset_t /* void* */, arg) #define hyp_yield() hyp_sched_op(SCHEDOP_yield, 0) #define hyp_block() hyp_sched_op(SCHEDOP_block, 0) -MACH_INLINE void __attribute__((noreturn)) hyp_crash(void) +static inline void __attribute__((noreturn)) hyp_crash(void) { unsigned int shut = SHUTDOWN_crash; hyp_sched_op(SCHEDOP_shutdown, kvtolin(&shut)); @@ -373,7 +373,7 @@ MACH_INLINE void __attribute__((noreturn)) hyp_crash(void) for(;;); } -MACH_INLINE void __attribute__((noreturn)) hyp_halt(void) +static inline void __attribute__((noreturn)) hyp_halt(void) { unsigned int shut = SHUTDOWN_poweroff; hyp_sched_op(SCHEDOP_shutdown, kvtolin(&shut)); @@ -382,7 +382,7 @@ MACH_INLINE void __attribute__((noreturn)) hyp_halt(void) for(;;); } -MACH_INLINE void __attribute__((noreturn)) hyp_reboot(void) +static inline void __attribute__((noreturn)) hyp_reboot(void) { unsigned int shut = SHUTDOWN_reboot; hyp_sched_op(SCHEDOP_shutdown, kvtolin(&shut)); @@ -395,7 +395,7 @@ _hypcall2(int, set_debugreg, int, reg, unsigned long, value); _hypcall1(unsigned long, get_debugreg, int, reg); /* x86-specific */ -MACH_INLINE uint64_t hyp_cpu_clock(void) { +static inline uint64_t hyp_cpu_clock(void) { uint32_t hi, lo; asm volatile("rdtsc" : "=d"(hi), "=a"(lo)); return (((uint64_t) hi) << 32) | lo; diff --git a/i386/i386at/model_dep.c b/i386/i386at/model_dep.c index e8462ba3..f40a6169 100644 --- a/i386/i386at/model_dep.c +++ b/i386/i386at/model_dep.c @@ -600,8 +600,10 @@ startrtclock(void) } #else clkstart(); +#ifndef MACH_HYP unmask_irq(0); #endif +#endif } void diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c index 40f672b5..e867ed59 100644 --- a/i386/intel/pmap.c +++ b/i386/intel/pmap.c @@ -643,7 +643,12 @@ static void pmap_bootstrap_pae(void) #endif /* PAE */ #ifdef MACH_PV_PAGETABLES -static void pmap_bootstrap_xen(void) +#ifdef PAE +#define NSUP_L1 4 +#else +#define NSUP_L1 1 +#endif +static void pmap_bootstrap_xen(pt_entry_t *l1_map[NSUP_L1]) { /* We don't actually deal with the CR3 register content at all */ hyp_vm_assist(VMASST_CMD_enable, VMASST_TYPE_pae_extended_cr3); @@ -654,12 +659,6 @@ static void pmap_bootstrap_xen(void) * other L1 table(s), thus 4MiB extra memory (resp. 8MiB), which is * enough for a pagetable mapping 4GiB. */ -#ifdef PAE -#define NSUP_L1 4 -#else -#define NSUP_L1 1 -#endif - pt_entry_t *l1_map[NSUP_L1]; vm_offset_t la; int n_l1map; for (n_l1map = 0, la = VM_MIN_KERNEL_ADDRESS; la >= VM_MIN_KERNEL_ADDRESS; la += NPTES * PAGE_SIZE) { @@ -763,7 +762,8 @@ void pmap_bootstrap(void) #endif /* PAE */ #ifdef MACH_PV_PAGETABLES - pmap_bootstrap_xen() + pt_entry_t *l1_map[NSUP_L1]; + pmap_bootstrap_xen(l1_map); #endif /* MACH_PV_PAGETABLES */ /* @@ -3138,13 +3138,13 @@ pmap_unmap_page_zero (void) void pmap_make_temporary_mapping(void) { + int i; /* * We'll have to temporarily install a direct mapping * between physical memory and low linear memory, * until we start using our new kernel segment descriptors. */ #if INIT_VM_MIN_KERNEL_ADDRESS != LINEAR_MIN_KERNEL_ADDRESS - int i; vm_offset_t delta = INIT_VM_MIN_KERNEL_ADDRESS - LINEAR_MIN_KERNEL_ADDRESS; if ((vm_offset_t)(-delta) < delta) delta = (vm_offset_t)(-delta); |