arch/x86: merge include/arch/x86/asm_inline.h into arch.h

This file is only included from arch.h, so merge it into same. This
also avoids confusion with files in arch/x86/include/ of the same name.

Signed-off-by: Charles E. Youse <charles.youse@intel.com>
This commit is contained in:
Charles E. Youse 2019-06-27 16:44:07 -07:00 committed by Anas Nashif
commit ef4eb300bf
2 changed files with 100 additions and 133 deletions

View file

@ -1,132 +0,0 @@
/*
* Copyright (c) 2015, Wind River Systems, Inc.
* Copyright (c) 2019, Intel Corp.
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef ZEPHYR_INCLUDE_ARCH_X86_ASM_INLINE_H_
#define ZEPHYR_INCLUDE_ARCH_X86_ASM_INLINE_H_
/*
* The file must not be included directly
* Include kernel.h instead
*/
#ifdef __cplusplus
extern "C" {
#endif
#ifndef _ASMLANGUAGE
#include <zephyr/types.h>
#include <stddef.h>
/**
*
* @internal
*
* @brief Disable all interrupts on the CPU
*
* GCC assembly internals of irq_lock(). See irq_lock() for a complete
* description.
*
* @return An architecture-dependent lock-out key representing the
* "interrupt disable state" prior to the call.
*/
static ALWAYS_INLINE unsigned int _do_irq_lock(void)
{
unsigned int key;
__asm__ volatile (
"pushfl;\n\t"
"cli;\n\t"
"popl %0;\n\t"
: "=g" (key)
:
: "memory"
);
return key;
}
/**
*
* @internal
*
* @brief Enable all interrupts on the CPU (inline)
*
* GCC assembly internals of irq_lock_unlock(). See irq_lock_unlock() for a
* complete description.
*
* @return N/A
*/
static ALWAYS_INLINE void z_do_irq_unlock(void)
{
__asm__ volatile (
"sti;\n\t"
: : : "memory"
);
}
/**
* @brief read timestamp register ensuring serialization
*/
static inline u64_t z_tsc_read(void)
{
union {
struct {
u32_t lo;
u32_t hi;
};
u64_t value;
} rv;
/* rdtsc & cpuid clobbers eax, ebx, ecx and edx registers */
__asm__ volatile (/* serialize */
"xorl %%eax,%%eax;\n\t"
"cpuid;\n\t"
:
:
: "%eax", "%ebx", "%ecx", "%edx"
);
/*
* We cannot use "=A", since this would use %rax on x86_64 and
* return only the lower 32bits of the TSC
*/
__asm__ volatile ("rdtsc" : "=a" (rv.lo), "=d" (rv.hi));
return rv.value;
}
/**
*
* @brief Get a 32 bit CPU timestamp counter
*
* @return a 32-bit number
*/
static ALWAYS_INLINE
u32_t z_do_read_cpu_timestamp32(void)
{
u32_t rv;
__asm__ volatile("rdtsc" : "=a"(rv) : : "%edx");
return rv;
}
#endif /* _ASMLANGUAGE */
#ifdef __cplusplus
}
#endif
#endif /* ZEPHYR_INCLUDE_ARCH_X86_ASM_INLINE_H_ */

View file

@ -24,7 +24,6 @@
#include <arch/common/ffs.h> #include <arch/common/ffs.h>
#ifndef _ASMLANGUAGE #ifndef _ASMLANGUAGE
#include <arch/x86/asm_inline.h>
#include <arch/common/addr_types.h> #include <arch/common/addr_types.h>
#include <arch/x86/ia32/segmentation.h> #include <arch/x86/ia32/segmentation.h>
#endif #endif
@ -366,6 +365,106 @@ typedef struct nanoIsf {
#ifndef _ASMLANGUAGE #ifndef _ASMLANGUAGE
/**
*
* @internal
*
* @brief Disable all interrupts on the CPU
*
* GCC assembly internals of irq_lock(). See irq_lock() for a complete
* description.
*
* @return An architecture-dependent lock-out key representing the
* "interrupt disable state" prior to the call.
*/
static ALWAYS_INLINE unsigned int _do_irq_lock(void)
{
unsigned int key;
__asm__ volatile (
"pushfl;\n\t"
"cli;\n\t"
"popl %0;\n\t"
: "=g" (key)
:
: "memory"
);
return key;
}
/**
*
* @internal
*
* @brief Enable all interrupts on the CPU (inline)
*
* GCC assembly internals of irq_lock_unlock(). See irq_lock_unlock() for a
* complete description.
*
* @return N/A
*/
static ALWAYS_INLINE void z_do_irq_unlock(void)
{
__asm__ volatile (
"sti;\n\t"
: : : "memory"
);
}
/**
* @brief read timestamp register ensuring serialization
*/
static inline u64_t z_tsc_read(void)
{
union {
struct {
u32_t lo;
u32_t hi;
};
u64_t value;
} rv;
/* rdtsc & cpuid clobbers eax, ebx, ecx and edx registers */
__asm__ volatile (/* serialize */
"xorl %%eax,%%eax;\n\t"
"cpuid;\n\t"
:
:
: "%eax", "%ebx", "%ecx", "%edx"
);
/*
* We cannot use "=A", since this would use %rax on x86_64 and
* return only the lower 32bits of the TSC
*/
__asm__ volatile ("rdtsc" : "=a" (rv.lo), "=d" (rv.hi));
return rv.value;
}
/**
*
* @brief Get a 32 bit CPU timestamp counter
*
* @return a 32-bit number
*/
static ALWAYS_INLINE
u32_t z_do_read_cpu_timestamp32(void)
{
u32_t rv;
__asm__ volatile("rdtsc" : "=a"(rv) : : "%edx");
return rv;
}
/** /**
* @brief Disable all interrupts on the CPU (inline) * @brief Disable all interrupts on the CPU (inline)
* *