Compare commits

..

4 Commits

Author SHA1 Message Date
270dd28b51 Merge branch 'development' 2018-11-08 17:28:28 +09:00
bfff009f7c configure.ac: Update version number to 1.6.0
Change-Id: I91148618c8db8035c8a2f11a20898df48607ad1f
2018-11-08 10:54:58 +09:00
a1fef219ad Merge tag '1.6.0-rc1' into master-1.6.0
Release target Nov 11, 2018

Conflicts:
	configure
2018-11-08 10:49:38 +09:00
14d819eff4 configure.ac: Update version number
Change-Id: Ia497306551aa103d80eb5a307ca7196940ea7e14
2018-07-06 18:28:26 +09:00
1125 changed files with 19395 additions and 61895 deletions

30
.gitignore vendored
View File

@ -9,27 +9,9 @@
Module.symvers
*.order
.tmp_versions
old_timestamp
CMakeFiles
CMakeCache.txt
Makefile
Kbuild
cmake_install.cmake
config.h
mcstop+release.sh
mcreboot.sh
mcreboot.1
mcoverlay-destroy.sh
mcoverlay-create.sh
kernel/mckernel.img
kernel/include/swapfmt.h
executer/user/vmcore2mckdump
executer/user/ql_talker
executer/user/mcexec.1
executer/user/mcexec
executer/user/libsched_yield.so.1.0.0
executer/user/libsched_yield.so
executer/user/libmcexec.a
executer/user/libldump2mcdump.so
executer/user/eclair
tools/mcstat/mcstat
elfboot/elfboot
elfboot/elfboot_test
linux/executer/mcexec
linux/mod_test*
linux/target

View File

@ -1,192 +0,0 @@
cmake_minimum_required(VERSION 2.6)
if (NOT CMAKE_BUILD_TYPE)
set (CMAKE_BUILD_TYPE "Debug" CACHE STRING "Build type: Debug Release..." FORCE)
endif (NOT CMAKE_BUILD_TYPE)
enable_language(C ASM)
project(mckernel C ASM)
set(MCKERNEL_VERSION "1.6.0")
set(CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake/modules)
# for rpmbuild
if(DEFINED SYSCONF_INSTALL_DIR)
set(CMAKE_INSTALL_SYSCONFDIR "${SYSCONF_INSTALL_DIR}")
endif()
include(GNUInstallDirs)
include(CMakeParseArguments)
include(Kbuild)
include(Ksym)
include(CheckCCompilerFlag)
set(CFLAGS_WARNINGS "-Wall -Wextra -Wno-unused-parameter -Wno-sign-compare -Wno-unused-function")
CHECK_C_COMPILER_FLAG(-Wno-implicit-fallthrough IMPLICIT_FALLTHROUGH)
if(IMPLICIT_FALLTHROUGH)
set(CFLAGS_WARNINGS "${CFLAGS_WARNINGS} -Wno-implicit-fallthrough")
endif(IMPLICIT_FALLTHROUGH)
# C flags need to be set before enabling language?
set(CMAKE_C_FLAGS_DEBUG "-g ${CFLAGS_WARNINGS}" CACHE STRING "Debug compiler flags")
set(CMAKE_C_FLAGS_RELEASE "${CFLAGS_WARNINGS}" CACHE STRING "Release compiler flags")
# build options
option(ENABLE_WERROR "Enable -Werror" OFF)
if (ENABLE_WERROR)
add_compile_options("-Werror")
endif(ENABLE_WERROR)
if (CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
set(BUILD_TARGET "smp-x86" CACHE STRING "Build target: smp-x86 | smp-arm64")
elseif (CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64")
set(BUILD_TARGET "smp-arm64" CACHE STRING "Build target: smp-x86 | smp-arm64")
endif()
if (BUILD_TARGET STREQUAL "smp-x86")
set(ARCH "x86_64")
elseif (BUILD_TARGET STREQUAL "smp-arm64")
set(ARCH "arm64")
foreach(i RANGE 1 120)
add_definitions(-DPOSTK_DEBUG_ARCH_DEP_${i} -DPOSTK_DEBUG_TEMP_FIX_${i})
set(KBUILD_C_FLAGS "${KBUILD_C_FLAGS} -DPOSTK_DEBUG_ARCH_DEP_${i} -DPOSTK_DEBUG_TEMP_FIX_${i}")
endforeach()
add_definitions(-DCONFIG_ARM64_64K_PAGES -DCONFIG_ARM64_VA_BITS=48)
endif()
set_property(CACHE BUILD_TARGET PROPERTY STRINGS smp-x86 smp-arm64)
set(ENABLE_MEMDUMP ON)
option(ENABLE_PERF "Enable perf support" ON)
option(ENABLE_RUSAGE "Enable rusage support" ON)
option(ENABLE_MCOVERLAYFS "Enable overlay filesystem" OFF)
option(ENABLE_QLMPI "Enable qlmpi programs" OFF)
option(ENABLE_UTI "Enable uti support" OFF)
option(ENABLE_UBSAN "Enable undefined behaviour sanitizer on mckernel size" OFF)
find_library(LIBRT rt)
find_library(LIBNUMA numa)
find_library(LIBBFD bfd)
find_library(LIBIBERTY iberty)
if (ENABLE_QLMPI)
find_package(MPI REQUIRED)
endif()
if (ENABLE_UTI)
find_library(LIBSYSCALL_INTERCEPT syscall_intercept)
endif()
string(REGEX REPLACE "^([0-9]+)\\.([0-9]+)\\.([0-9]+)(-([0-9]+)(.*))?" "\\1;\\2;\\3;\\5;\\6" LINUX_VERSION ${UNAME_R})
list(GET LINUX_VERSION 0 LINUX_VERSION_MAJOR)
list(GET LINUX_VERSION 1 LINUX_VERSION_MINOR)
list(GET LINUX_VERSION 2 LINUX_VERSION_PATCH)
list(GET LINUX_VERSION 3 LINUX_VERSION_RELEASE)
math(EXPR LINUX_VERSION_CODE "${LINUX_VERSION_MAJOR} * 65536 + ${LINUX_VERSION_MINOR} * 256 + ${LINUX_VERSION_PATCH}")
ksym(sys_mount PREFIX MCCTRL_)
ksym(sys_umount PREFIX MCCTRL_)
ksym(sys_unshare PREFIX MCCTRL_)
ksym(zap_page_range PREFIX MCCTRL_)
ksym(vdso_image_64 PREFIX MCCTRL_)
ksym(vdso_start PREFIX MCCTRL_)
ksym(vdso_end PREFIX MCCTRL_)
ksym(vdso_pages PREFIX MCCTRL_)
ksym(__vvar_page PREFIX MCCTRL_)
ksym(hpet_address PREFIX MCCTRL_)
# POSTK_DEBUG_ARCH_DEP_50, add:find kernel symbol.
ksym(vdso_spec PREFIX MCCTRL_)
ksym(hv_clock PREFIX MCCTRL_)
ksym(sys_readlink PREFIX MCCTRL_)
ksym(walk_page_range PREFIX MCCTRL_)
# compat with various install paths
set(MCKERNEL_LIBDIR ${CMAKE_INSTALL_FULL_LIBDIR})
set(BINDIR ${CMAKE_INSTALL_FULL_BINDIR})
set(SBINDIR ${CMAKE_INSTALL_FULL_SBINDIR})
set(ETCDIR ${CMAKE_INSTALL_FULL_SYSCONFDIR})
set(ROOTFSDIR "${CMAKE_INSTALL_PREFIX}/rootfs")
if (CMAKE_INSTALL_PREFIX STREQUAL "/usr")
set(KMODDIR "/lib/modules/${UNAME_R}/extra/mckernel")
set(MCKERNELDIR "${CMAKE_INSTALL_FULL_DATADIR}/mckernel/${BUILD_TARGET}")
else()
set(KMODDIR "${CMAKE_INSTALL_PREFIX}/kmod")
set(MCKERNELDIR "${CMAKE_INSTALL_PREFIX}/${BUILD_TARGET}/kernel")
endif()
set(prefix ${CMAKE_INSTALL_PREFIX})
# set rpath for everyone
set(CMAKE_INSTALL_RPATH ${CMAKE_INSTALL_FULL_LIBDIR})
# ihk: ultimately should support extrnal build, but add as subproject for now
if (EXISTS ${PROJECT_SOURCE_DIR}/ihk/CMakeLists.txt)
set(IHK_SOURCE_DIR "ihk" CACHE STRINGS "path to ihk source directory from mckernel sources")
elseif (EXISTS ${PROJECT_SOURCE_DIR}/../ihk/CMakeLists.txt)
set(IHK_SOURCE_DIR "../ihk" CACHE STRINGS "path to ihk source directory from mckernel sources")
else()
set(IHK_SOURCE_DIR "ihk" CACHE STRINGS "path to ihk source directory from mckernel sources")
endif()
if (EXISTS ${PROJECT_SOURCE_DIR}/${IHK_SOURCE_DIR}/CMakeLists.txt)
set(IHK_FULL_SOURCE_DIR ${PROJECT_SOURCE_DIR}/${IHK_SOURCE_DIR})
elseif (EXISTS /${IHK_SOURCE_DIR}/CMakeLists.txt)
set(IHK_FULL_SOURCE_DIR /${IHK_SOURCE_DIR})
else()
message(FATAL_ERROR "Could not find ihk dir, or it does not contain CMakeLists.txt, either clone ihk or run git submodule update --init")
endif()
add_subdirectory(${IHK_SOURCE_DIR} ihk)
configure_file(config.h.in config.h)
# actual build section - just subdirs
add_subdirectory(executer/kernel/mcctrl)
if (ENABLE_MCOVERLAYFS)
add_subdirectory(executer/kernel/mcoverlayfs)
endif()
add_subdirectory(executer/user)
add_subdirectory(kernel)
add_subdirectory(tools/mcstat)
configure_file(arch/x86_64/tools/mcreboot-smp-x86.sh.in mcreboot.sh @ONLY)
configure_file(arch/x86_64/tools/mcstop+release-smp-x86.sh.in mcstop+release.sh @ONLY)
configure_file(arch/x86_64/tools/mcreboot.1in mcreboot.1 @ONLY)
install(PROGRAMS
"${CMAKE_CURRENT_BINARY_DIR}/mcreboot.sh"
"${CMAKE_CURRENT_BINARY_DIR}/mcstop+release.sh"
DESTINATION "${CMAKE_INSTALL_SBINDIR}")
install(FILES
"arch/x86_64/tools/irqbalance_mck.service"
"arch/x86_64/tools/irqbalance_mck.in"
DESTINATION "${CMAKE_INSTALL_SYSCONFDIR}")
install(FILES "${CMAKE_CURRENT_BINARY_DIR}/mcreboot.1"
DESTINATION "${CMAKE_INSTALL_MANDIR}/man1")
configure_file(scripts/mckernel.spec.in scripts/mckernel.spec @ONLY)
set(CPACK_SOURCE_PACKAGE_FILE_NAME "${CMAKE_PROJECT_NAME}-${MCKERNEL_VERSION}")
set(CPACK_SOURCE_IGNORE_FILES "/.git$")
set(CPACK_SOURCE_INSTALLED_DIRECTORIES "${CMAKE_SOURCE_DIR};/;${IHK_FULL_SOURCE_DIR};/ihk;${CMAKE_BINARY_DIR}/scripts;/scripts")
set(CPACK_SOURCE_GENERATOR "TGZ")
include(CPack)
add_custom_target(dist COMMAND ${CMAKE_MAKE_PROGRAM} package_source)
# config report
message("-------------------------------")
message("Option summary")
message("-------------------------------")
message("Build type: ${CMAKE_BUILD_TYPE}")
message("Build target: ${BUILD_TARGET}")
message("IHK_SOURCE_DIR: ${IHK_SOURCE_DIR} (relative to mckernel source tree)")
message("UNAME_R: ${UNAME_R}")
message("KERNEL_DIR: ${KERNEL_DIR}")
message("SYSTEM_MAP: ${SYSTEM_MAP}")
message("VMLINUX: ${VMLINUX}")
message("KBUILD_C_FLAGS: ${KBUILD_C_FLAGS}")
message("ENABLE_MEMDUMP: ${ENABLE_MEMDUMP}")
message("ENABLE_PERF: ${ENABLE_PERF}")
message("ENABLE_RUSAGE: ${ENABLE_RUSAGE}")
message("ENABLE_MCOVERLAYFS: ${ENABLE_MCOVERLAYFS}")
message("ENABLE_QLMPI: ${ENABLE_QLMPI}")
message("ENABLE_UTI: ${ENABLE_UTI}")
message("ENABLE_WERROR: ${ENABLE_WERROR}")
message("ENABLE_UBSAN: ${ENABLE_UBSAN}")
message("-------------------------------")

339
LICENSE
View File

@ -1,339 +0,0 @@
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
<signature of Ty Coon>, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License.

81
Makefile.in Executable file
View File

@ -0,0 +1,81 @@
TARGET = @TARGET@
SBINDIR = @SBINDIR@
INCDIR = @INCDIR@
ETCDIR = @ETCDIR@
MANDIR = @MANDIR@
all: executer-mcctrl executer-mcoverlayfs executer-user mckernel mck-tools
executer-mcctrl:
+@(cd executer/kernel/mcctrl; $(MAKE) modules)
executer-mcoverlayfs:
+@(cd executer/kernel/mcoverlayfs; $(MAKE) modules)
executer-user:
+@(cd executer/user; $(MAKE))
mckernel:
+@case "$(TARGET)" in \
attached-mic | builtin-x86 | builtin-mic | smp-x86 | smp-arm64) \
(cd kernel; $(MAKE)) \
;; \
*) \
echo "unknown target $(TARGET)" >&2 \
exit 1 \
;; \
esac
mck-tools:
+@(cd tools/mcstat; $(MAKE))
install:
@(cd executer/kernel/mcctrl; $(MAKE) install)
@(cd executer/kernel/mcoverlayfs; $(MAKE) install)
@(cd executer/user; $(MAKE) install)
@case "$(TARGET)" in \
attached-mic | builtin-x86 | builtin-mic | smp-x86 | smp-arm64) \
(cd kernel; $(MAKE) install) \
;; \
*) \
echo "unknown target $(TARGET)" >&2 \
exit 1 \
;; \
esac
@case "$(TARGET)" in \
smp-x86 | smp-arm64) \
mkdir -p -m 755 $(SBINDIR); \
install -m 755 arch/x86_64/tools/mcreboot-smp-x86.sh $(SBINDIR)/mcreboot.sh; \
install -m 755 arch/x86_64/tools/mcstop+release-smp-x86.sh $(SBINDIR)/mcstop+release.sh; \
install -m 755 arch/x86_64/tools/mcoverlay-destroy-smp-x86.sh $(SBINDIR)/mcoverlay-destroy.sh; \
install -m 755 arch/x86_64/tools/mcoverlay-create-smp-x86.sh $(SBINDIR)/mcoverlay-create.sh; \
install -m 755 arch/x86_64/tools/eclair-dump-backtrace.exp $(SBINDIR)/eclair-dump-backtrace.exp;\
mkdir -p -m 755 $(ETCDIR); \
install -m 644 arch/x86_64/tools/irqbalance_mck.service $(ETCDIR)/irqbalance_mck.service; \
install -m 644 arch/x86_64/tools/irqbalance_mck.in $(ETCDIR)/irqbalance_mck.in; \
mkdir -p -m 755 $(INCDIR); \
install -m 644 kernel/include/swapfmt.h $(INCDIR); \
mkdir -p -m 755 $(MANDIR)/man1; \
install -m 644 arch/x86_64/tools/mcreboot.1 $(MANDIR)/man1/mcreboot.1; \
;; \
*) \
echo "unknown target $(TARGET)" >&2 \
exit 1 \
;; \
esac
@(cd tools/mcstat/; $(MAKE) install)
clean:
@(cd executer/kernel/mcctrl; $(MAKE) clean)
@(cd executer/kernel/mcoverlayfs; $(MAKE) clean)
@(cd executer/user; $(MAKE) clean)
@case "$(TARGET)" in \
attached-mic | builtin-x86 | builtin-mic | smp-x86 | smp-arm64) \
(cd kernel; $(MAKE) clean) \
;; \
*) \
echo "unknown target $(TARGET)" >&2 \
exit 1 \
;; \
esac
@(cd tools/mcstat; $(MAKE) clean)

186
README.md
View File

@ -1,186 +0,0 @@
![McKernel Logo](https://www.sys.r-ccs.riken.jp/members_files/bgerofi/mckernel-logo.png)
-------------------------
IHK/McKernel is a light-weight multi-kernel operating system designed for high-end supercomputing. It runs Linux and McKernel, a light-weight kernel (LWK), side-by-side inside compute nodes and aims at the following:
- Provide scalable and consistent execution of large-scale parallel scientific applications, but at the same time maintain the ability to rapidly adapt to new hardware features and emerging programming models
- Provide efficient memory and device management so that resource contention and data movement are minimized at the system level
- Eliminate OS noise by isolating OS services in Linux and provide jitter free execution on the LWK
- Support the full POSIX/Linux APIs by selectively offloading (slow-path) system calls to Linux
## Contents
- [Background] (#background)
- [Architectural Overview](#architectural-overview)
- [Installation](#installation)
- [The Team](#the-team)
## Background and Motivation
With the growing complexity of high-end supercomputers, the current system software stack faces significant challenges as we move forward to exascale and beyond. The necessity to deal with extreme degree of parallelism, heterogeneous architectures, multiple levels of memory hierarchy, power constraints, etc., advocates operating systems that can rapidly adapt to new hardware requirements, and that can support novel programming paradigms and runtime systems. On the other hand, a new class of more dynamic and complex applications are also on the horizon, with an increasing demand for application constructs such as in-situ analysis, workflows, elaborate monitoring and performance tools. This complexity relies not only on the rich features of POSIX, but also on the Linux APIs (such as the */proc*, */sys* filesystems, etc.) in particular.
##### Two Traditional HPC OS Approaches
Traditionally, light-weight operating systems specialized for HPC followed two approaches to tackle scalable execution of large-scale applications. In the full weight kernel (FWK) approach, a full Linux environment is taken as the basis, and features that inhibit attaining HPC scalability are removed, i.e., making it light-weight. The pure light-weight kernel (LWK) approach, on the other hand, starts from scratch and effort is undertaken to add sufficient functionality so that it provides a familiar API, typically something close to that of a general purpose OS, while at the same time it retains the desired scalability and reliability attributes. Neither of these approaches yields a fully Linux compatible environment.
##### The Multi-kernel Approach
A hybrid approach recognized recently by the system software community is to run Linux simultaneously with a lightweight kernel on compute nodes and multiple research projects are now pursuing this direction. The basic idea is that simulations run on an HPC tailored lightweight kernel, ensuring the necessary isolation for noiseless execution of parallel applications, but Linux is leveraged so that the full POSIX API is supported. Additionally, the small code base of the LWK can also facilitate rapid prototyping for new, exotic hardware features. Nevertheless, the questions of how to share node resources between the two types of kernels, where do device drivers execute, how exactly do the two kernels interact with each other and to what extent are they integrated, remain subjects of ongoing debate.
## Architectural Overview
At the heart of the stack is a low-level software infrastructure called Interface for Heterogeneous Kernels (IHK). IHK is a general framework that provides capabilities for partitioning resources in a many-core environment (e.g.,CPU cores and physical memory) and it enables management of lightweight kernels. IHK can allocate and release host resources dynamically and no reboot of the host machine is required when altering configuration. IHK also provides a low-level inter-kernel messaging infrastructure, called the Inter-Kernel Communication (IKC) layer. An architectural overview of the main system components is shown below.
![arch](https://www.sys.r-ccs.riken.jp/members_files/bgerofi/mckernel.png)
McKernel is a lightweight kernel written from scratch. It is designed for HPC and is booted from IHK. McKernel retains a binary compatible ABI with Linux, however, it implements only a small set of performance sensitive system calls and the rest are offloaded to Linux. Specifically, McKernel has its own memory management, it supports processes and multi-threading with a simple round-robin cooperative (tick-less) scheduler, and it implements signaling. It also allows inter-process memory mappings and it provides interfaces to hardware performance counters.
### Functionality
An overview of some of the principal functionalities of the IHK/McKernel stack is provided below.
#### System Call Offloading
System call forwarding in McKernel is implemented as follows. When an offloaded system call occurs, McKernel marshals the system call number along with its arguments and sends a message to Linux via a dedicated IKC channel. The corresponding proxy process running on Linux is by default waiting for system call requests through an ioctl() call into IHKs system call delegator kernel module. The delegator kernel modules IKC interrupt handler wakes up the proxy process, which returns to userspace and simply invokes the requested system call. Once it obtains the return value, it instructs the delegator module to send the result back to McKernel, which subsequently passes the value to user-space.
#### Unified Address Space
The unified address space model in IHK/McKernel ensures that offloaded system calls can seamlessly resolve arguments even in case of pointers. This mechanism is depicted below and is implemented as follows.
![unified_ap](https://www.sys.r-ccs.riken.jp/members_files/bgerofi/img/unified_address_space_en.png)
First, the proxy process is compiled as a position independent binary, which enables us to map the code and data segments specific to the proxy process to an address range which is explicitly excluded from McKernels user space. The grey box on the right side of the figure demonstrates the excluded region. Second, the entire valid virtual address range of McKernels application user-space is covered by a special mapping in the proxy process for which we use a pseudo file mapping in Linux. This mapping is indicated by the blue box on the left side of the figure.
## Installation
For a smooth experience, we recommend the following combination of OS distributions and platforms:
- CentOS 7.3+ running on Intel Xeon / Xeon Phi
##### 1. Change SELinux settings
Log in as the root and disable SELinux:
~~~~
vim /etc/selinux/config
~~~~
Change the file to SELINUX=disabled
##### 2. Reboot the host machine
~~~~
sudo reboot
~~~~
##### 3. Prepare packages, kernel symbol table file
You will need the following packages installed:
~~~~
sudo yum install kernel-devel binutils-devel libnuma-devel
~~~~
Grant read permission to the System.map file of your kernel version:
~~~~
sudo chmod a+r /boot/System.map-`uname -r`
~~~~
##### 4. Obtain sources and compile the kernel
Clone the source code and set up ihk symlink (this is currently required):
~~~~
mkdir -p ~/src/ihk+mckernel/
cd ~/src/ihk+mckernel/
git clone -r git@github.com:RIKEN-SysSoft/mckernel.git
~~~~
Configure and compile:
~~~~
mkdir -p build && cd build
cmake -DCMAKE_INSTALL_PREFIX=${HOME}/ihk+mckernel $HOME/src/mckernel
make -j install
~~~~
The IHK kernel modules and McKernel kernel image should be installed under the **ihk+mckernel** folder in your home directory.
##### 5. Boot McKernel
A boot script called mcreboot.sh is provided under sbin in the install folder. To boot on logical CPU 1 with 512MB of memory, use the following invocation:
~~~~
export TOP=${HOME}/ihk+mckernel/
cd ${TOP}
sudo ./sbin/mcreboot.sh -c 1 -m 512m
~~~~
You should see something similar like this if you display the McKernel's kernel message log:
~~~~
./sbin/ihkosctl 0 kmsg
IHK/McKernel started.
[ -1]: no_execute_available: 1
[ -1]: map_fixed: phys: 0xfee00000 => 0xffff860000009000 (1 pages)
[ -1]: setup_x86 done.
[ -1]: ns_per_tsc: 385
[ -1]: KCommand Line: hidos dump_level=24
[ -1]: Physical memory: 0x1ad3000 - 0x21000000, 525520896 bytes, 128301 pages available @ NUMA: 0
[ -1]: NUMA: 0, Linux NUMA: 0, type: 1, available bytes: 525520896, pages: 128301
[ -1]: NUMA 0 distances: 0 (10),
[ -1]: map_fixed: phys: 0x28000 => 0xffff86000000a000 (2 pages)
[ -1]: Trampoline area: 0x28000
[ -1]: map_fixed: phys: 0x0 => 0xffff86000000c000 (1 pages)
[ -1]: # of cpus : 1
[ -1]: locals = ffff880001af6000
[ 0]: BSP: 0 (HW ID: 1 @ NUMA 0)
[ 0]: BSP: booted 0 AP CPUs
[ 0]: Master channel init acked.
[ 0]: vdso is enabled
IHK/McKernel booted.
~~~~
##### 5. Run a simple program on McKernel
The mcexec command line tool (which is also the Linux proxy process) can be used for executing applications on McKernel:
~~~~
./bin/mcexec hostname
centos-vm
~~~~
##### 6. Shutdown McKernel
Finally, to shutdown McKernel and release CPU/memory resources back to Linux use the following command:
~~~~
sudo ./sbin/mcstop+release.sh
~~~~
## The Team
The McKernel project was started at The University of Tokyo and currently it is mainly developed at RIKEN.
Some of our collaborators include:
- Hitachi
- Fujitsu
- CEA (France)
- NEC
## License
McKernel is GPL licensed, as found in the LICENSE file.

View File

@ -1,4 +1,4 @@
# Makefile.arch.in COPYRIGHT FUJITSU LIMITED 2015-2018
# Makefile.arch COPYRIGHT FUJITSU LIMITED 2015-2017
VDSO_SRCDIR = $(SRC)/../arch/$(IHKARCH)/kernel/vdso
VDSO_BUILDDIR = @abs_builddir@/vdso
VDSO_SO_O = $(O)/vdso.so.o
@ -6,22 +6,23 @@ VDSO_SO_O = $(O)/vdso.so.o
IHK_OBJS += assert.o cache.o cpu.o cputable.o context.o entry.o entry-fpsimd.o
IHK_OBJS += fault.o head.o hyp-stub.o local.o perfctr.o perfctr_armv8pmu.o proc.o proc-macros.o
IHK_OBJS += psci.o smp.o trampoline.o traps.o fpsimd.o
IHK_OBJS += debug-monitors.o hw_breakpoint.o ptrace.o timer.o
IHK_OBJS += debug-monitors.o hw_breakpoint.o ptrace.o
IHK_OBJS += $(notdir $(VDSO_SO_O)) memory.o syscall.o vdso.o
IHK_OBJS += irq-gic-v2.o irq-gic-v3.o
IHK_OBJS += memcpy.o memset.o
IHK_OBJS += cpufeature.o
IHK_OBJS += imp-sysreg.o
# POSTK_DEBUG_ARCH_DEP_18 coredump arch separation.
# IHK_OBJS added coredump.o
IHK_OBJS += coredump.o
$(VDSO_SO_O): $(VDSO_BUILDDIR)/vdso.so
$(VDSO_BUILDDIR)/vdso.so: FORCE
$(call echo_cmd,BUILD VDSO,$(TARGET))
mkdir -p $(O)/vdso
TARGETDIR="$(TARGETDIR)" $(submake) -C $(VDSO_BUILDDIR) $(SUBOPTS) prepare
TARGETDIR="$(TARGETDIR)" $(submake) -C $(VDSO_BUILDDIR) $(SUBOPTS)
@mkdir -p $(O)/vdso
@TARGETDIR="$(TARGETDIR)" $(submake) -C $(VDSO_BUILDDIR) $(SUBOPTS) prepare
@TARGETDIR="$(TARGETDIR)" $(submake) -C $(VDSO_BUILDDIR) $(SUBOPTS)
FORCE:

View File

@ -1,4 +1,4 @@
/* assert.c COPYRIGHT FUJITSU LIMITED 2015-2018 */
/* assert.c COPYRIGHT FUJITSU LIMITED 2015-2017 */
#include <process.h>
#include <list.h>
@ -24,7 +24,6 @@ STATIC_ASSERT(offsetof(struct pt_regs, sp) == S_SP);
STATIC_ASSERT(offsetof(struct pt_regs, pc) == S_PC);
STATIC_ASSERT(offsetof(struct pt_regs, pstate) == S_PSTATE);
STATIC_ASSERT(offsetof(struct pt_regs, orig_x0) == S_ORIG_X0);
STATIC_ASSERT(offsetof(struct pt_regs, orig_pc) == S_ORIG_PC);
STATIC_ASSERT(offsetof(struct pt_regs, syscallno) == S_SYSCALLNO);
STATIC_ASSERT(sizeof(struct pt_regs) == S_FRAME_SIZE);
@ -51,6 +50,3 @@ STATIC_ASSERT(sizeof(struct sigcontext) - offsetof(struct sigcontext, __reserved
ALIGN_UP(sizeof(struct _aarch64_ctx), 16) > sizeof(struct extra_context));
STATIC_ASSERT(SVE_PT_FPSIMD_OFFSET == sizeof(struct user_sve_header));
STATIC_ASSERT(SVE_PT_SVE_OFFSET == sizeof(struct user_sve_header));
/* assert for struct arm64_cpu_local_thread member offset define */
STATIC_ASSERT(offsetof(struct arm64_cpu_local_thread, panic_regs) == 160);

View File

@ -1,4 +1,5 @@
/* coredump.c COPYRIGHT FUJITSU LIMITED 2015-2016 */
#ifdef POSTK_DEBUG_ARCH_DEP_18 /* coredump arch separation. */
#include <process.h>
#include <elfcore.h>
#include <string.h>
@ -30,3 +31,5 @@ void arch_fill_prstatus(struct elf_prstatus64 *prstatus, struct thread *thread,
/* copy unaligned prstatus addr */
memcpy(prstatus, &tmp_prstatus, sizeof(*prstatus));
}
#endif /* POSTK_DEBUG_ARCH_DEP_18 */

View File

@ -1,4 +1,4 @@
/* cpu.c COPYRIGHT FUJITSU LIMITED 2015-2018 */
/* cpu.c COPYRIGHT FUJITSU LIMITED 2015-2017 */
#include <ihk/cpu.h>
#include <ihk/debug.h>
#include <ihk/mm.h>
@ -31,8 +31,9 @@
#include <sysreg.h>
#include <cpufeature.h>
#include <debug.h>
#ifdef POSTK_DEBUG_ARCH_DEP_65
#include <hwcap.h>
#include <virt.h>
#endif /* POSTK_DEBUG_ARCH_DEP_65 */
//#define DEBUG_PRINT_CPU
@ -44,6 +45,7 @@
#endif
struct cpuinfo_arm64 cpuinfo_data[NR_CPUS]; /* index is logical cpuid */
static unsigned int per_cpu_timer_val[NR_CPUS] = { 0 };
static struct list_head handlers[1024];
static void cpu_init_interrupt_handler(void);
@ -53,6 +55,7 @@ void assign_processor_id(void);
void arch_delay(int);
int gettime_local_support = 0;
extern int ihk_mc_pt_print_pte(struct page_table *pt, void *virt);
extern int interrupt_from_user(void *);
extern unsigned long ihk_param_gic_dist_base_pa;
@ -113,54 +116,137 @@ static struct ihk_mc_interrupt_handler cpu_stop_handler = {
.priv = NULL,
};
extern long freeze_thaw(void *nmi_ctx);
static void multi_nm_interrupt_handler(void *priv)
/* @ref.impl include/clocksource/arm_arch_timer.h */
#define ARCH_TIMER_CTRL_ENABLE (1 << 0)
#define ARCH_TIMER_CTRL_IT_MASK (1 << 1)
#define ARCH_TIMER_CTRL_IT_STAT (1 << 2)
static void physical_timer_handler(void *priv)
{
extern int nmi_mode;
struct pt_regs *regs = (struct pt_regs *)priv;
union arm64_cpu_local_variables *clv;
unsigned int ctrl = 0;
int cpu = ihk_mc_get_processor_id();
switch (nmi_mode) {
case 1:
case 2:
/* mode == 1or2, for FREEZER NMI */
dkprintf("%s: freeze mode NMI catch. (nmi_mode=%d)\n",
__func__, nmi_mode);
freeze_thaw(NULL);
break;
dkprintf("CPU%d: catch physical timer\n", cpu);
case 0:
/* mode == 0, for MEMDUMP NMI */
clv = get_arm64_this_cpu_local();
asm volatile("mrs %0, cntp_ctl_el0" : "=r" (ctrl));
if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
unsigned int zero = 0;
unsigned int val = ctrl;
unsigned int clocks = per_cpu_timer_val[cpu];
unsigned long irqstate;
struct cpu_local_var *v = get_this_cpu_local_var();
if (regs) {
memcpy(clv->arm64_cpu_local_thread.panic_regs,
regs->regs, sizeof(regs->regs));
clv->arm64_cpu_local_thread.panic_regs[31] = regs->sp;
clv->arm64_cpu_local_thread.panic_regs[32] = regs->pc;
clv->arm64_cpu_local_thread.panic_regs[33] =
regs->pstate;
}
clv->arm64_cpu_local_thread.paniced = 1;
ihk_mc_query_mem_areas();
/* memdump-nmi is halted McKernel, break is unnecessary. */
/* fall through */
case 3:
/* mode == 3, for SHUTDOWN-WAIT NMI */
while (1) {
cpu_halt();
}
break;
/* set resched flag */
irqstate = ihk_mc_spinlock_lock(&v->runq_lock);
v->flags |= CPU_FLAG_NEED_RESCHED;
ihk_mc_spinlock_unlock(&v->runq_lock, irqstate);
default:
ekprintf("%s: Unknown nmi-mode(%d) detected.\n",
__func__, nmi_mode);
break;
/* gen control register value */
val &= ~(ARCH_TIMER_CTRL_IT_STAT | ARCH_TIMER_CTRL_IT_MASK);
val |= ARCH_TIMER_CTRL_ENABLE;
/* set timer re-enable for periodic */
asm volatile("msr cntp_ctl_el0, %0" : : "r" (zero));
asm volatile("msr cntp_tval_el0, %0" : : "r" (clocks));
asm volatile("msr cntp_ctl_el0, %0" : : "r" (val));
}
}
static struct ihk_mc_interrupt_handler multi_nmi_handler = {
.func = multi_nm_interrupt_handler,
static struct ihk_mc_interrupt_handler phys_timer_handler = {
.func = physical_timer_handler,
.priv = NULL,
};
static void virtual_timer_handler(void *priv)
{
unsigned int ctrl = 0;
int cpu = ihk_mc_get_processor_id();
dkprintf("CPU%d: catch virtual timer\n", cpu);
asm volatile("mrs %0, cntv_ctl_el0" : "=r" (ctrl));
if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
unsigned int zero = 0;
unsigned int val = ctrl;
unsigned int clocks = per_cpu_timer_val[cpu];
unsigned long irqstate;
struct cpu_local_var *v = get_this_cpu_local_var();
/* set resched flag */
irqstate = ihk_mc_spinlock_lock(&v->runq_lock);
v->flags |= CPU_FLAG_NEED_RESCHED;
ihk_mc_spinlock_unlock(&v->runq_lock, irqstate);
/* gen control register value */
val &= ~(ARCH_TIMER_CTRL_IT_STAT | ARCH_TIMER_CTRL_IT_MASK);
val |= ARCH_TIMER_CTRL_ENABLE;
/* set timer re-enable for periodic */
asm volatile("msr cntv_ctl_el0, %0" : : "r" (zero));
asm volatile("msr cntv_tval_el0, %0" : : "r" (clocks));
asm volatile("msr cntv_ctl_el0, %0" : : "r" (val));
}
}
static struct ihk_mc_interrupt_handler virt_timer_handler = {
.func = virtual_timer_handler,
.priv = NULL,
};
static void memdump_interrupt_handler(void *priv)
{
struct pt_regs *regs;
union arm64_cpu_local_variables *clv;
regs = cpu_local_var(current)->uctx;
clv = get_arm64_this_cpu_local();
if (regs && interrupt_from_user(regs)) {
memcpy(clv->arm64_cpu_local_thread.panic_regs, regs->regs, sizeof(regs->regs));
clv->arm64_cpu_local_thread.panic_regs[31] = regs->sp;
clv->arm64_cpu_local_thread.panic_regs[32] = regs->pc;
clv->arm64_cpu_local_thread.panic_regs[33] = regs->pstate;
}
else {
asm volatile (
"stp x0, x1, [%3, #16 * 0]\n"
"stp x2, x3, [%3, #16 * 1]\n"
"stp x4, x5, [%3, #16 * 2]\n"
"stp x6, x7, [%3, #16 * 3]\n"
"stp x8, x9, [%3, #16 * 4]\n"
"stp x10, x11, [%3, #16 * 5]\n"
"stp x12, x13, [%3, #16 * 6]\n"
"stp x14, x15, [%3, #16 * 7]\n"
"stp x16, x17, [%3, #16 * 8]\n"
"stp x18, x19, [%3, #16 * 9]\n"
"stp x20, x21, [%3, #16 * 10]\n"
"stp x22, x23, [%3, #16 * 11]\n"
"stp x24, x25, [%3, #16 * 12]\n"
"stp x26, x27, [%3, #16 * 13]\n"
"stp x28, x29, [%3, #16 * 14]\n"
"str x30, [%3, #16 * 15]\n"
"mov %0, sp\n"
"adr %1, 1f\n"
"mrs %2, spsr_el1\n"
"1:"
: "=r" (clv->arm64_cpu_local_thread.panic_regs[31]), /* sp */
"=r" (clv->arm64_cpu_local_thread.panic_regs[32]), /* pc */
"=r" (clv->arm64_cpu_local_thread.panic_regs[33]) /* spsr_el1 */
: "r" (&clv->arm64_cpu_local_thread.panic_regs)
: "memory"
);
}
clv->arm64_cpu_local_thread.paniced = 1;
while(1)
{
cpu_halt();
}
}
static struct ihk_mc_interrupt_handler memdump_handler = {
.func = memdump_interrupt_handler,
.priv = NULL,
};
@ -247,160 +333,19 @@ static void setup_processor(void)
static char *trampoline_va, *first_page_va;
static inline uint64_t pwr_arm64hpc_read_imp_fj_core_uarch_restrection_el1(void)
unsigned long is_use_virt_timer(void)
{
uint64_t reg;
extern unsigned long ihk_param_use_virt_timer;
asm volatile("mrs_s %0, " __stringify(IMP_FJ_CORE_UARCH_RESTRECTION_EL1)
: "=r" (reg) : : "memory");
return reg;
}
static ihk_spinlock_t imp_fj_core_uarch_restrection_el1_lock =
SPIN_LOCK_UNLOCKED;
static inline void pwr_arm64hpc_write_imp_fj_core_uarch_restrection_el1(uint64_t set_bit,
uint64_t clear_bit)
{
uint64_t reg;
unsigned long flags;
flags = ihk_mc_spinlock_lock(&imp_fj_core_uarch_restrection_el1_lock);
reg = pwr_arm64hpc_read_imp_fj_core_uarch_restrection_el1();
reg = (reg & ~clear_bit) | set_bit;
asm volatile("msr_s " __stringify(IMP_FJ_CORE_UARCH_RESTRECTION_EL1) ", %0"
: : "r" (reg) : "memory");
ihk_mc_spinlock_unlock(&imp_fj_core_uarch_restrection_el1_lock, flags);
}
static inline uint64_t pwr_arm64hpc_read_imp_soc_standby_ctrl_el1(void)
{
uint64_t reg;
asm volatile("mrs_s %0, " __stringify(IMP_SOC_STANDBY_CTRL_EL1)
: "=r" (reg) : : "memory");
return reg;
}
static ihk_spinlock_t imp_soc_standby_ctrl_el1_lock = SPIN_LOCK_UNLOCKED;
static inline void pwr_arm64hpc_write_imp_soc_standby_ctrl_el1(uint64_t set_bit,
uint64_t clear_bit)
{
unsigned long flags;
uint64_t reg;
flags = ihk_mc_spinlock_lock(&imp_soc_standby_ctrl_el1_lock);
reg = pwr_arm64hpc_read_imp_soc_standby_ctrl_el1();
reg = (reg & ~clear_bit) | set_bit;
asm volatile("msr_s " __stringify(IMP_SOC_STANDBY_CTRL_EL1) ", %0"
: : "r" (reg) : "memory");
ihk_mc_spinlock_unlock(&imp_soc_standby_ctrl_el1_lock, flags);
}
static unsigned long *retention_state_flag;
static inline int is_hpcpwr_available(void)
{
if (retention_state_flag)
return 1;
else
return 0;
}
static inline void pwr_arm64hpc_map_retention_state_flag(void)
{
extern unsigned long ihk_param_retention_state_flag_pa;
unsigned long size = BITS_TO_LONGS(NR_CPUS) * sizeof(unsigned long);
if (!ihk_param_retention_state_flag_pa) {
return;
switch (ihk_param_use_virt_timer) {
case 0: /* physical */
case 1: /* virtual */
break;
default: /* invalid */
panic("PANIC: is_use_virt_timer(): timer select neither phys-timer nor virt-timer.\n");
break;
}
retention_state_flag = map_fixed_area(ihk_param_retention_state_flag_pa,
size, 0);
}
static inline int pwr_arm64hpc_retention_state_get(uint64_t *val)
{
unsigned long linux_core_id;
int cpu = ihk_mc_get_processor_id();
int ret;
if (!is_hpcpwr_available()) {
*val = 0;
return 0;
}
ret = ihk_mc_get_core(cpu, &linux_core_id, NULL, NULL);
if (ret) {
return ret;
}
*val = test_bit(linux_core_id, retention_state_flag);
return ret;
}
static inline int pwr_arm64hpc_retention_state_set(uint64_t val)
{
unsigned long linux_core_id;
int cpu = ihk_mc_get_processor_id();
int ret;
if (!is_hpcpwr_available()) {
return 0;
}
ret = ihk_mc_get_core(cpu, &linux_core_id, NULL, NULL);
if (ret) {
return ret;
}
if (val) {
set_bit(cpu, retention_state_flag);
} else {
clear_bit(cpu, retention_state_flag);
}
return ret;
}
static inline int pwr_arm64hpc_enable_retention_state(void)
{
if (!is_hpcpwr_available()) {
return 0;
}
pwr_arm64hpc_write_imp_soc_standby_ctrl_el1(IMP_SOC_STANDBY_CTRL_EL1_RETENTION,
0);
return 0;
}
static inline void init_power_management(void)
{
int state;
uint64_t imp_fj_clear_bit = 0;
uint64_t imp_soc_clear_bit = 0;
if (!is_hpcpwr_available()) {
return;
}
/* retention state */
state = pwr_arm64hpc_retention_state_set(0);
if (state) {
panic("error: initialize power management\n");
}
/* issue state */
imp_fj_clear_bit |= IMP_FJ_CORE_UARCH_RESTRECTION_EL1_ISSUE_RESTRICTION;
/* eco_state */
imp_fj_clear_bit |= IMP_FJ_CORE_UARCH_RESTRECTION_EL1_FL_RESTRICT_TRANS;
imp_soc_clear_bit |= IMP_SOC_STANDBY_CTRL_EL1_ECO_MODE;
/* ex_pipe_state */
imp_fj_clear_bit |= IMP_FJ_CORE_UARCH_RESTRECTION_EL1_EX_RESTRICTION;
/* write */
pwr_arm64hpc_write_imp_fj_core_uarch_restrection_el1(0,
imp_fj_clear_bit);
pwr_arm64hpc_write_imp_soc_standby_ctrl_el1(0, imp_soc_clear_bit);
return ihk_param_use_virt_timer;
}
/*@
@ -418,19 +363,23 @@ void ihk_mc_init_ap(void)
kprintf("# of cpus : %d\n", cpu_info->ncpus);
init_processors_local(cpu_info->ncpus);
kprintf("IKC IRQ vector: %d, IKC target CPU APIC: %d\n",
ihk_ikc_irq, ihk_ikc_irq_apicid);
/* Do initialization for THIS cpu (BSP) */
assign_processor_id();
ihk_mc_register_interrupt_handler(INTRID_CPU_STOP, &cpu_stop_handler);
ihk_mc_register_interrupt_handler(INTRID_MULTI_NMI, &multi_nmi_handler);
ihk_mc_register_interrupt_handler(INTRID_MEMDUMP, &memdump_handler);
ihk_mc_register_interrupt_handler(
ihk_mc_get_vector(IHK_TLB_FLUSH_IRQ_VECTOR_START),
&remote_tlb_flush_handler);
ihk_mc_register_interrupt_handler(get_timer_intrid(),
get_timer_handler());
ihk_mc_get_vector(IHK_TLB_FLUSH_IRQ_VECTOR_START), &remote_tlb_flush_handler);
if (is_use_virt_timer()) {
ihk_mc_register_interrupt_handler(get_virt_timer_intrid(), &virt_timer_handler);
} else {
ihk_mc_register_interrupt_handler(get_phys_timer_intrid(), &phys_timer_handler);
}
init_smp_processor();
init_power_management();
}
extern void vdso_init(void);
@ -439,13 +388,15 @@ long (*__arm64_syscall_handler)(int, ihk_mc_user_context_t *);
/* @ref.impl arch/arm64/include/asm/arch_timer.h::arch_timer_get_cntkctl */
static inline unsigned int arch_timer_get_cntkctl(void)
{
return read_sysreg(cntkctl_el1);
unsigned int cntkctl;
asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl));
return cntkctl;
}
/* @ref.impl arch/arm64/include/asm/arch_timer.h::arch_timer_set_cntkctl */
static inline void arch_timer_set_cntkctl(unsigned int cntkctl)
{
write_sysreg(cntkctl, cntkctl_el1);
asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl));
}
#ifdef CONFIG_ARM_ARCH_TIMER_EVTSTREAM
@ -511,19 +462,19 @@ void init_cpu(void)
{
if(gic_enable)
gic_enable();
arm64_init_per_cpu_perfctr();
arm64_enable_pmu();
if (xos_is_tchip()) {
vhbm_barrier_registers_init();
scdrv_registers_init();
hpc_registers_init();
}
arm64_enable_user_access_pmu_regs();
}
#ifdef CONFIG_ARM64_VHE
/* @ref.impl arch/arm64/include/asm/virt.h */
static inline int is_kernel_in_hyp_mode(void)
{
unsigned long el;
asm("mrs %0, CurrentEL" : "=r" (el));
return el == CurrentEL_EL2;
}
/* @ref.impl arch/arm64/kernel/smp.c */
/* Whether the boot CPU is running in HYP mode or not */
static int boot_cpu_hyp_mode;
@ -568,12 +519,8 @@ void setup_arm64(void)
arm64_init_perfctr();
arch_timer_init();
gic_init();
pwr_arm64hpc_map_retention_state_flag();
init_cpu();
init_gettime_support();
@ -616,7 +563,6 @@ void setup_arm64_ap(void (*next_func)(void))
debug_monitors_init();
arch_timer_configure_evtstream();
init_cpu();
init_power_management();
call_ap_func(next_func);
/* BUG */
@ -646,7 +592,6 @@ static void show_context_stack(struct pt_regs *regs)
max_loop = (stack_top - sp) / min_stack_frame_size;
for (i = 0; i < max_loop; i++) {
extern char _head[], _end[];
uintptr_t *fp, *lr;
fp = (uintptr_t *)sp;
lr = (uintptr_t *)(sp + 8);
@ -655,8 +600,7 @@ static void show_context_stack(struct pt_regs *regs)
break;
}
if ((*lr < (unsigned long)_head) ||
(*lr > (unsigned long)_end)) {
if ((*lr < MAP_KERNEL_START) || (*lr > MAP_KERNEL_START + MAP_KERNEL_SIZE)) {
break;
}
@ -681,7 +625,7 @@ void handle_IPI(unsigned int vector, struct pt_regs *regs)
else {
list_for_each_entry(h, &handlers[vector], list) {
if (h->func) {
h->func(h->priv == NULL ? regs : h->priv);
h->func(h->priv);
}
}
}
@ -697,18 +641,7 @@ static void __arm64_wakeup(int hw_cpuid, unsigned long entry)
/** IHK Functions **/
/* send WFI(Wait For Interrupt) instruction */
static inline void cpu_do_idle(void)
{
extern void __cpu_do_idle(void);
uint64_t retention;
int state;
state = pwr_arm64hpc_retention_state_get(&retention);
if ((state == 0) && (retention != 0)) {
pwr_arm64hpc_enable_retention_state();
}
__cpu_do_idle();
}
extern void cpu_do_idle(void);
/* halt by WFI(Wait For Interrupt) */
void cpu_halt(void)
@ -1182,32 +1115,20 @@ long ihk_mc_show_cpuinfo(char *buf, size_t buf_size, unsigned long read_off, int
int j = 0;
/* generate strings */
loff += scnprintf(lbuf + loff, lbuf_size - loff,
"processor\t: %d\n", cpuinfo->hwid);
loff += scnprintf(lbuf + loff, lbuf_size - loff, "Features\t:");
loff += snprintf(lbuf + loff, lbuf_size - loff, "processor\t: %d\n", cpuinfo->hwid);
loff += snprintf(lbuf + loff, lbuf_size - loff, "Features\t:");
for (j = 0; hwcap_str[j]; j++) {
if (elf_hwcap & (1 << j)) {
loff += scnprintf(lbuf + loff,
lbuf_size - loff,
" %s", hwcap_str[j]);
loff += snprintf(lbuf + loff, lbuf_size - loff, " %s", hwcap_str[j]);
}
}
loff += scnprintf(lbuf + loff, lbuf_size - loff, "\n");
loff += scnprintf(lbuf + loff, lbuf_size - loff,
"CPU implementer\t: 0x%02x\n",
MIDR_IMPLEMENTOR(midr));
loff += scnprintf(lbuf + loff, lbuf_size - loff,
"CPU architecture: 8\n");
loff += scnprintf(lbuf + loff, lbuf_size - loff,
"CPU variant\t: 0x%x\n",
MIDR_VARIANT(midr));
loff += scnprintf(lbuf + loff, lbuf_size - loff,
"CPU part\t: 0x%03x\n",
MIDR_PARTNUM(midr));
loff += scnprintf(lbuf + loff, lbuf_size - loff,
"CPU revision\t: %d\n\n",
MIDR_REVISION(midr));
loff += snprintf(lbuf + loff, lbuf_size - loff, "\n");
loff += snprintf(lbuf + loff, lbuf_size - loff, "CPU implementer\t: 0x%02x\n", MIDR_IMPLEMENTOR(midr));
loff += snprintf(lbuf + loff, lbuf_size - loff, "CPU architecture: 8\n");
loff += snprintf(lbuf + loff, lbuf_size - loff, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
loff += snprintf(lbuf + loff, lbuf_size - loff, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
loff += snprintf(lbuf + loff, lbuf_size - loff, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
/* check buffer depletion */
if ((i < num_processors - 1) && ((lbuf_size - loff) == 1)) {
@ -1236,6 +1157,7 @@ err:
static int check_and_allocate_fp_regs(struct thread *thread);
void save_fp_regs(struct thread *thread);
#ifdef POSTK_DEBUG_ARCH_DEP_23 /* add arch dep. clone_thread() function */
void arch_clone_thread(struct thread *othread, unsigned long pc,
unsigned long sp, struct thread *nthread)
{
@ -1245,6 +1167,10 @@ void arch_clone_thread(struct thread *othread, unsigned long pc,
asm("mrs %0, tpidr_el0" : "=r" (tls));
othread->tlsblock_base = nthread->tlsblock_base = tls;
if ((othread->fp_regs != NULL) && (check_and_allocate_fp_regs(nthread) == 0)) {
memcpy(nthread->fp_regs, othread->fp_regs, sizeof(fp_regs_struct));
}
/* if SVE enable, takeover lower 128 bit register */
if (likely(elf_hwcap & HWCAP_SVE)) {
fp_regs_struct fp_regs;
@ -1254,6 +1180,7 @@ void arch_clone_thread(struct thread *othread, unsigned long pc,
thread_fpsimd_to_sve(nthread, &fp_regs);
}
}
#endif /* POSTK_DEBUG_ARCH_DEP_23 */
/*@
@ requires \valid(handler);
@ -1273,7 +1200,7 @@ void ihk_mc_delay_us(int us)
arch_delay(us);
}
void arch_print_stack(void)
void arch_print_stack()
{
}
@ -1309,11 +1236,6 @@ void arch_show_interrupt_context(const void *reg)
kprintf(" syscallno : %016lx\n", regs->syscallno);
}
void arch_cpu_stop(void)
{
psci_cpu_off();
}
/*@
@ behavior fs_base:
@ assumes type == IHK_ASR_X86_FS;
@ -1494,10 +1416,6 @@ out:
void
save_fp_regs(struct thread *thread)
{
if (thread == &cpu_local_var(idle)) {
return;
}
if (likely(elf_hwcap & (HWCAP_FP | HWCAP_ASIMD))) {
if (check_and_allocate_fp_regs(thread) != 0) {
// alloc error.
@ -1558,6 +1476,98 @@ restore_fp_regs(struct thread *thread)
}
}
void
lapic_timer_enable(unsigned int clocks)
{
unsigned int val = 0;
/* gen control register value */
asm volatile("mrs %0, cntp_ctl_el0" : "=r" (val));
val &= ~(ARCH_TIMER_CTRL_IT_STAT | ARCH_TIMER_CTRL_IT_MASK);
val |= ARCH_TIMER_CTRL_ENABLE;
if (is_use_virt_timer()) {
asm volatile("msr cntv_tval_el0, %0" : : "r" (clocks));
asm volatile("msr cntv_ctl_el0, %0" : : "r" (val));
} else {
asm volatile("msr cntp_tval_el0, %0" : : "r" (clocks));
asm volatile("msr cntp_ctl_el0, %0" : : "r" (val));
}
per_cpu_timer_val[ihk_mc_get_processor_id()] = clocks;
}
void
unhandled_page_fault(struct thread *thread, void *fault_addr, void *regs)
{
const uintptr_t address = (uintptr_t)fault_addr;
struct process_vm *vm = thread->vm;
struct vm_range *range;
unsigned long irqflags;
unsigned long error = 0;
irqflags = kprintf_lock();
__kprintf("Page fault for 0x%lx\n", address);
__kprintf("%s for %s access in %s mode (reserved bit %s set), "
"it %s an instruction fetch\n",
(error & PF_PROT ? "protection fault" : "no page found"),
(error & PF_WRITE ? "write" : "read"),
(error & PF_USER ? "user" : "kernel"),
(error & PF_RSVD ? "was" : "wasn't"),
(error & PF_INSTR ? "was" : "wasn't"));
range = lookup_process_memory_range(vm, address, address+1);
if (range) {
__kprintf("address is in range, flag: 0x%lx\n",
range->flag);
ihk_mc_pt_print_pte(vm->address_space->page_table, (void*)address);
} else {
__kprintf("address is out of range! \n");
}
kprintf_unlock(irqflags);
/* TODO */
ihk_mc_debug_show_interrupt_context(regs);
if (!interrupt_from_user(regs)) {
panic("panic: kernel mode PF");
}
//dkprintf("now dump a core file\n");
//coredump(proc, regs);
#ifdef DEBUG_PRINT_MEM
{
uint64_t *sp = (void *)REGS_GET_STACK_POINTER(regs);
kprintf("*rsp:%lx,*rsp+8:%lx,*rsp+16:%lx,*rsp+24:%lx,\n",
sp[0], sp[1], sp[2], sp[3]);
}
#endif
return;
}
void
lapic_timer_disable()
{
unsigned int zero = 0;
unsigned int val = 0;
/* gen control register value */
asm volatile("mrs %0, cntp_ctl_el0" : "=r" (val));
val &= ~(ARCH_TIMER_CTRL_IT_STAT | ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE);
if (is_use_virt_timer()) {
asm volatile("msr cntv_ctl_el0, %0" : : "r" (val));
asm volatile("msr cntv_tval_el0, %0" : : "r" (zero));
} else {
asm volatile("msr cntp_ctl_el0, %0" : : "r" (val));
asm volatile("msr cntp_tval_el0, %0" : : "r" (zero));
}
per_cpu_timer_val[ihk_mc_get_processor_id()] = 0;
}
void init_tick(void)
{
dkprintf("init_tick():\n");
@ -1586,174 +1596,25 @@ void arch_start_pvclock(void)
void
mod_nmi_ctx(void *nmi_ctx, void (*func)())
{
func();
}
extern void freeze(void);
void __freeze(void)
{
freeze();
}
#define SYSREG_READ_S(sys_reg) case (sys_reg): asm volatile("mrs_s %0, " __stringify(sys_reg) : "=r" (*val)); break
static inline int arch_cpu_mrs(uint32_t sys_reg, uint64_t *val)
{
int ret = 0;
switch (sys_reg) {
SYSREG_READ_S(IMP_FJ_TAG_ADDRESS_CTRL_EL1);
SYSREG_READ_S(IMP_SCCR_CTRL_EL1);
SYSREG_READ_S(IMP_SCCR_ASSIGN_EL1);
SYSREG_READ_S(IMP_SCCR_SET0_L2_EL1);
SYSREG_READ_S(IMP_SCCR_SET1_L2_EL1);
SYSREG_READ_S(IMP_SCCR_L1_EL0);
SYSREG_READ_S(IMP_PF_CTRL_EL1);
SYSREG_READ_S(IMP_PF_STREAM_DETECT_CTRL_EL0);
SYSREG_READ_S(IMP_PF_INJECTION_CTRL0_EL0);
SYSREG_READ_S(IMP_PF_INJECTION_CTRL1_EL0);
SYSREG_READ_S(IMP_PF_INJECTION_CTRL2_EL0);
SYSREG_READ_S(IMP_PF_INJECTION_CTRL3_EL0);
SYSREG_READ_S(IMP_PF_INJECTION_CTRL4_EL0);
SYSREG_READ_S(IMP_PF_INJECTION_CTRL5_EL0);
SYSREG_READ_S(IMP_PF_INJECTION_CTRL6_EL0);
SYSREG_READ_S(IMP_PF_INJECTION_CTRL7_EL0);
SYSREG_READ_S(IMP_PF_INJECTION_DISTANCE0_EL0);
SYSREG_READ_S(IMP_PF_INJECTION_DISTANCE1_EL0);
SYSREG_READ_S(IMP_PF_INJECTION_DISTANCE2_EL0);
SYSREG_READ_S(IMP_PF_INJECTION_DISTANCE3_EL0);
SYSREG_READ_S(IMP_PF_INJECTION_DISTANCE4_EL0);
SYSREG_READ_S(IMP_PF_INJECTION_DISTANCE5_EL0);
SYSREG_READ_S(IMP_PF_INJECTION_DISTANCE6_EL0);
SYSREG_READ_S(IMP_PF_INJECTION_DISTANCE7_EL0);
SYSREG_READ_S(IMP_BARRIER_CTRL_EL1);
SYSREG_READ_S(IMP_BARRIER_BST_BIT_EL1);
SYSREG_READ_S(IMP_BARRIER_INIT_SYNC_BB0_EL1);
SYSREG_READ_S(IMP_BARRIER_INIT_SYNC_BB1_EL1);
SYSREG_READ_S(IMP_BARRIER_INIT_SYNC_BB2_EL1);
SYSREG_READ_S(IMP_BARRIER_INIT_SYNC_BB3_EL1);
SYSREG_READ_S(IMP_BARRIER_INIT_SYNC_BB4_EL1);
SYSREG_READ_S(IMP_BARRIER_INIT_SYNC_BB5_EL1);
SYSREG_READ_S(IMP_BARRIER_ASSIGN_SYNC_W0_EL1);
SYSREG_READ_S(IMP_BARRIER_ASSIGN_SYNC_W1_EL1);
SYSREG_READ_S(IMP_BARRIER_ASSIGN_SYNC_W2_EL1);
SYSREG_READ_S(IMP_BARRIER_ASSIGN_SYNC_W3_EL1);
SYSREG_READ_S(IMP_SOC_STANDBY_CTRL_EL1);
SYSREG_READ_S(IMP_FJ_CORE_UARCH_CTRL_EL2);
SYSREG_READ_S(IMP_FJ_CORE_UARCH_RESTRECTION_EL1);
/* fall through */
default:
return -EINVAL;
}
return ret;
}
static inline int arch_cpu_read_register(struct ihk_os_cpu_register *desc)
{
int ret = 0;
uint64_t value;
if (desc->addr) {
panic("memory mapped register is not supported.\n");
} else if (desc->addr_ext) {
ret = arch_cpu_mrs(desc->addr_ext, &value);
if (ret == 0) {
desc->val = value;
}
} else {
ret = -EINVAL;
}
return ret;
}
#define SYSREG_WRITE_S(sys_reg) case (sys_reg): asm volatile("msr_s " __stringify(sys_reg) ", %0" :: "r" (val)); break
static inline int arch_cpu_msr(uint32_t sys_reg, uint64_t val)
{
int ret = 0;
switch (sys_reg) {
SYSREG_WRITE_S(IMP_FJ_TAG_ADDRESS_CTRL_EL1);
SYSREG_WRITE_S(IMP_SCCR_CTRL_EL1);
SYSREG_WRITE_S(IMP_SCCR_ASSIGN_EL1);
SYSREG_WRITE_S(IMP_SCCR_SET0_L2_EL1);
SYSREG_WRITE_S(IMP_SCCR_SET1_L2_EL1);
SYSREG_WRITE_S(IMP_SCCR_L1_EL0);
SYSREG_WRITE_S(IMP_PF_CTRL_EL1);
SYSREG_WRITE_S(IMP_PF_STREAM_DETECT_CTRL_EL0);
SYSREG_WRITE_S(IMP_PF_INJECTION_CTRL0_EL0);
SYSREG_WRITE_S(IMP_PF_INJECTION_CTRL1_EL0);
SYSREG_WRITE_S(IMP_PF_INJECTION_CTRL2_EL0);
SYSREG_WRITE_S(IMP_PF_INJECTION_CTRL3_EL0);
SYSREG_WRITE_S(IMP_PF_INJECTION_CTRL4_EL0);
SYSREG_WRITE_S(IMP_PF_INJECTION_CTRL5_EL0);
SYSREG_WRITE_S(IMP_PF_INJECTION_CTRL6_EL0);
SYSREG_WRITE_S(IMP_PF_INJECTION_CTRL7_EL0);
SYSREG_WRITE_S(IMP_PF_INJECTION_DISTANCE0_EL0);
SYSREG_WRITE_S(IMP_PF_INJECTION_DISTANCE1_EL0);
SYSREG_WRITE_S(IMP_PF_INJECTION_DISTANCE2_EL0);
SYSREG_WRITE_S(IMP_PF_INJECTION_DISTANCE3_EL0);
SYSREG_WRITE_S(IMP_PF_INJECTION_DISTANCE4_EL0);
SYSREG_WRITE_S(IMP_PF_INJECTION_DISTANCE5_EL0);
SYSREG_WRITE_S(IMP_PF_INJECTION_DISTANCE6_EL0);
SYSREG_WRITE_S(IMP_PF_INJECTION_DISTANCE7_EL0);
SYSREG_WRITE_S(IMP_BARRIER_CTRL_EL1);
SYSREG_WRITE_S(IMP_BARRIER_BST_BIT_EL1);
SYSREG_WRITE_S(IMP_BARRIER_INIT_SYNC_BB0_EL1);
SYSREG_WRITE_S(IMP_BARRIER_INIT_SYNC_BB1_EL1);
SYSREG_WRITE_S(IMP_BARRIER_INIT_SYNC_BB2_EL1);
SYSREG_WRITE_S(IMP_BARRIER_INIT_SYNC_BB3_EL1);
SYSREG_WRITE_S(IMP_BARRIER_INIT_SYNC_BB4_EL1);
SYSREG_WRITE_S(IMP_BARRIER_INIT_SYNC_BB5_EL1);
SYSREG_WRITE_S(IMP_BARRIER_ASSIGN_SYNC_W0_EL1);
SYSREG_WRITE_S(IMP_BARRIER_ASSIGN_SYNC_W1_EL1);
SYSREG_WRITE_S(IMP_BARRIER_ASSIGN_SYNC_W2_EL1);
SYSREG_WRITE_S(IMP_BARRIER_ASSIGN_SYNC_W3_EL1);
SYSREG_WRITE_S(IMP_FJ_CORE_UARCH_CTRL_EL2);
SYSREG_WRITE_S(IMP_FJ_CORE_UARCH_RESTRECTION_EL1);
/* fallthrough */
case IMP_SOC_STANDBY_CTRL_EL1:
asm volatile("msr_s " __stringify(IMP_SOC_STANDBY_CTRL_EL1) ", %0"
: : "r" (val) : "memory");
if (val & IMP_SOC_STANDBY_CTRL_EL1_MODE_CHANGE) {
wfe();
}
break;
default:
return -EINVAL;
}
return ret;
}
static inline int arch_cpu_write_register(struct ihk_os_cpu_register *desc)
{
int ret = 0;
if (desc->addr) {
panic("memory mapped register is not supported.\n");
} else if (desc->addr_ext) {
ret = arch_cpu_msr(desc->addr_ext, desc->val);
} else {
ret = -EINVAL;
}
return ret;
/* TODO: skeleton for rusage */
}
int arch_cpu_read_write_register(
struct ihk_os_cpu_register *desc,
enum mcctrl_os_cpu_operation op)
{
int ret;
/* TODO: skeleton for patch:0676 */
if (op == MCCTRL_OS_CPU_READ_REGISTER) {
ret = arch_cpu_read_register(desc);
// desc->val = rdmsr(desc->addr);
}
else if (op == MCCTRL_OS_CPU_WRITE_REGISTER) {
ret = arch_cpu_write_register(desc);
// wrmsr(desc->addr, desc->val);
}
else {
ret = -1;
return -1;
}
return ret;
return 0;
}
int smp_call_func(cpu_set_t *__cpu_set, smp_func_t __func, void *__arg)

View File

@ -1,4 +1,4 @@
/* cpufeature.c COPYRIGHT FUJITSU LIMITED 2017-2018 */
/* cpufeature.c COPYRIGHT FUJITSU LIMITED 2017 */
#include <cpufeature.h>
#include <ihk/debug.h>
@ -10,7 +10,9 @@
#include <ptrace.h>
#include <hwcap.h>
#ifdef POSTK_DEBUG_ARCH_DEP_65
unsigned long elf_hwcap;
#endif /* POSTK_DEBUG_ARCH_DEP_65 */
/* @ref.impl arch/arm64/kernel/cpufeature.c */
#define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
@ -52,19 +54,6 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
ARM64_FTR_END,
};
/* @ref.impl linux4.16.0 arch/arm64/kernel/cpufeature.c */
static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE,
ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE,
ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE,
ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE,
ID_AA64ISAR1_DPB_SHIFT, 4, 0),
ARM64_FTR_END,
};
/* @ref.impl arch/arm64/kernel/cpufeature.c */
static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
@ -315,7 +304,7 @@ static const struct __ftr_reg_entry {
/* Op1 = 0, CRn = 0, CRm = 6 */
ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1),
ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_raz),
/* Op1 = 0, CRn = 0, CRm = 7 */
ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
@ -1008,7 +997,9 @@ void setup_cpu_features(void)
setup_elf_hwcaps(arm64_elf_hwcaps);
}
#ifdef POSTK_DEBUG_ARCH_DEP_65
unsigned long arch_get_hwcap(void)
{
return elf_hwcap;
}
#endif /* POSTK_DEBUG_ARCH_DEP_65 */

View File

@ -1,11 +1,10 @@
/* entry.S COPYRIGHT FUJITSU LIMITED 2015-2018 */
/* entry.S COPYRIGHT FUJITSU LIMITED 2015-2017 */
#include <linkage.h>
#include <assembler.h>
#include <asm-offsets.h>
#include <esr.h>
#include <thread_info.h>
#include <asm-syscall.h>
/*
* Bad Abort numbers
@ -78,20 +77,16 @@
.macro kernel_exit, el, need_enable_step = 0
.if \el == 0
bl check_sig_pending
bl check_need_resched // or reschedule is needed.
mov x0, #0
mov x1, sp
mov x2, #0
bl check_signal // check whether the signal is delivered
bl check_need_resched // or reschedule is needed.
mov x0, #0
mov x1, sp
mov x2, #0
bl check_signal_irq_disabled // check whether the signal is delivered(for kernel_exit)
.endif
.if \el == 1
bl check_sig_pending
.endif
disable_irq x1 // disable interrupts
.if \need_enable_step == 1
ldr x1, [tsk, #TI_FLAGS]
@ -372,12 +367,7 @@ el0_sync:
b el0_inv
el0_svc:
uxtw scno, w8 // syscall number in w8
cmp scno, #__NR_rt_sigreturn
b.eq 1f
str x0, [sp, #S_ORIG_X0] // save the original x0
ldr x16, [sp, #S_PC]
str x16, [sp, #S_ORIG_PC] // save the original pc
1: str scno, [sp, #S_SYSCALLNO] // save syscall number
stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
enable_nmi
enable_dbg_and_irq x0
adrp x16, __arm64_syscall_handler
@ -560,7 +550,9 @@ ENTRY(ret_from_fork)
blr x19
1: get_thread_info tsk
bl release_runq_lock
bl utilthr_migrate
b ret_to_user
ENDPROC(ret_from_fork)
/* TODO: skeleton for rusage */
ENTRY(__freeze)
ENDPROC(__freeze)

View File

@ -1,4 +1,4 @@
/* fault.c COPYRIGHT FUJITSU LIMITED 2015-2018 */
/* fault.c COPYRIGHT FUJITSU LIMITED 2015-2017 */
#include <ihk/context.h>
#include <ihk/debug.h>
@ -13,6 +13,7 @@
unsigned long __page_fault_handler_address;
extern int interrupt_from_user(void *);
void set_signal(int sig, void *regs, struct siginfo *info);
static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs);
static int do_page_fault(unsigned long addr, unsigned int esr, struct pt_regs *regs);
static int do_translation_fault(unsigned long addr, unsigned int esr, struct pt_regs *regs);
@ -104,13 +105,12 @@ void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
const struct fault_info *inf = fault_info + (esr & 63);
struct siginfo info;
const int from_user = interrupt_from_user(regs);
/* set_cputime called in inf->fn() */
if (!inf->fn(addr, esr, regs))
return;
set_cputime(from_user ? CPUTIME_MODE_U2K : CPUTIME_MODE_K2K_IN);
set_cputime(interrupt_from_user(regs)? 1: 2);
kprintf("Unhandled fault: %s (0x%08x) at 0x%016lx\n", inf->name, esr, addr);
info.si_signo = inf->sig;
info.si_errno = 0;
@ -118,7 +118,7 @@ void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs)
info._sifields._sigfault.si_addr = (void*)addr;
arm64_notify_die("", regs, &info, esr);
set_cputime(from_user ? CPUTIME_MODE_K2U : CPUTIME_MODE_K2K_OUT);
set_cputime(0);
}
/*
@ -127,24 +127,21 @@ void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs)
void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
struct siginfo info;
const int from_user = interrupt_from_user(regs);
set_cputime(from_user ? CPUTIME_MODE_U2K : CPUTIME_MODE_K2K_IN);
set_cputime(interrupt_from_user(regs)? 1: 2);
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRALN;
info._sifields._sigfault.si_addr = (void*)addr;
arm64_notify_die("", regs, &info, esr);
set_cputime(from_user ? CPUTIME_MODE_K2U : CPUTIME_MODE_K2K_OUT);
set_cputime(0);
}
static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
struct siginfo info;
const int from_user = interrupt_from_user(regs);
set_cputime(from_user ? CPUTIME_MODE_U2K : CPUTIME_MODE_K2K_IN);
set_cputime(interrupt_from_user(regs) ? 1: 2);
/*
* If we are in kernel mode at this point, we have no context to
* handle this fault with.
@ -166,7 +163,7 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
(addr < PAGE_SIZE) ? "NULL pointer dereference" : "paging request", addr);
panic("OOps.");
}
set_cputime(from_user ? CPUTIME_MODE_K2U : CPUTIME_MODE_K2K_OUT);
set_cputime(0);
}
static int is_el0_instruction_abort(unsigned int esr)
@ -195,7 +192,6 @@ static int do_page_fault(unsigned long addr, unsigned int esr,
}
}
/* set_cputime() call in page_fault_handler() */
page_fault_handler = (void *)__page_fault_handler_address;
(*page_fault_handler)((void *)addr, reason, regs);
@ -256,10 +252,10 @@ int do_debug_exception(unsigned long addr, unsigned int esr, struct pt_regs *reg
{
const struct fault_info *inf = debug_fault_info + DBG_ESR_EVT(esr);
struct siginfo info;
const int from_user = interrupt_from_user(regs);
int from_user = interrupt_from_user(regs);
int ret = -1;
set_cputime(from_user ? CPUTIME_MODE_U2K : CPUTIME_MODE_K2K_IN);
set_cputime(from_user ? 1: 2);
if (!inf->fn(addr, esr, regs)) {
ret = 1;
@ -278,7 +274,7 @@ int do_debug_exception(unsigned long addr, unsigned int esr, struct pt_regs *reg
ret = 0;
out:
set_cputime(from_user ? CPUTIME_MODE_K2U : CPUTIME_MODE_K2K_OUT);
set_cputime(0);
return ret;
}
@ -287,9 +283,7 @@ out:
*/
static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
const int from_user = interrupt_from_user(regs);
set_cputime(from_user ? CPUTIME_MODE_U2K : CPUTIME_MODE_K2K_IN);
set_cputime(from_user ? CPUTIME_MODE_K2U : CPUTIME_MODE_K2K_OUT);
set_cputime(interrupt_from_user(regs) ? 1: 2);
set_cputime(0);
return 1;
}

View File

@ -1,4 +1,4 @@
/* fpsimd.c COPYRIGHT FUJITSU LIMITED 2016-2018 */
/* fpsimd.c COPYRIGHT FUJITSU LIMITED 2016-2017 */
#include <thread_info.h>
#include <fpsimd.h>
#include <cpuinfo.h>

470
arch/arm64/kernel/gencore.c Normal file
View File

@ -0,0 +1,470 @@
/* gencore.c COPYRIGHT FUJITSU LIMITED 2015-2016 */
#ifndef POSTK_DEBUG_ARCH_DEP_18 /* coredump arch separation. */
#include <ihk/debug.h>
#include <kmalloc.h>
#include <cls.h>
#include <list.h>
#include <process.h>
#include <string.h>
#include <elfcore.h>
#include <debug.h>
#define align32(x) ((((x) + 3) / 4) * 4)
#define alignpage(x) ((((x) + (PAGE_SIZE) - 1) / (PAGE_SIZE)) * (PAGE_SIZE))
//#define DEBUG_PRINT_GENCORE
#ifdef DEBUG_PRINT_GENCORE
#undef DDEBUG_DEFAULT
#define DDEBUG_DEFAULT DDEBUG_PRINT
#endif
/*
* Generate a core file image, which consists of many chunks.
* Returns an allocated table, an etnry of which is a pair of the address
* of a chunk and its length.
*/
/**
* \brief Fill the elf header.
*
* \param eh An Elf64_Ehdr structure.
* \param segs Number of segments of the core file.
*/
void fill_elf_header(Elf64_Ehdr *eh, int segs)
{
eh->e_ident[EI_MAG0] = 0x7f;
eh->e_ident[EI_MAG1] = 'E';
eh->e_ident[EI_MAG2] = 'L';
eh->e_ident[EI_MAG3] = 'F';
eh->e_ident[EI_CLASS] = ELFCLASS64;
eh->e_ident[EI_DATA] = ELFDATA2LSB;
eh->e_ident[EI_VERSION] = El_VERSION;
eh->e_ident[EI_OSABI] = ELFOSABI_NONE;
eh->e_ident[EI_ABIVERSION] = El_ABIVERSION_NONE;
eh->e_type = ET_CORE;
#ifdef CONFIG_MIC
eh->e_machine = EM_K10M;
#else
eh->e_machine = EM_X86_64;
#endif
eh->e_version = EV_CURRENT;
eh->e_entry = 0; /* Do we really need this? */
eh->e_phoff = 64; /* fixed */
eh->e_shoff = 0; /* no section header */
eh->e_flags = 0;
eh->e_ehsize = 64; /* fixed */
eh->e_phentsize = 56; /* fixed */
eh->e_phnum = segs;
eh->e_shentsize = 0;
eh->e_shnum = 0;
eh->e_shstrndx = 0;
}
/**
* \brief Return the size of the prstatus entry of the NOTE segment.
*
*/
int get_prstatus_size(void)
{
return sizeof(struct note) + align32(sizeof("CORE"))
+ align32(sizeof(struct elf_prstatus64));
}
/**
* \brief Fill a prstatus structure.
*
* \param head A pointer to a note structure.
* \param thread A pointer to the current thread structure.
* \param regs0 A pointer to a x86_regs structure.
*/
void fill_prstatus(struct note *head, struct thread *thread, void *regs0)
{
/* TODO(pka_idle) */
}
/**
* \brief Return the size of the prpsinfo entry of the NOTE segment.
*
*/
int get_prpsinfo_size(void)
{
return sizeof(struct note) + align32(sizeof("CORE"))
+ align32(sizeof(struct elf_prpsinfo64));
}
/**
* \brief Fill a prpsinfo structure.
*
* \param head A pointer to a note structure.
* \param thread A pointer to the current thread structure.
* \param regs A pointer to a x86_regs structure.
*/
void fill_prpsinfo(struct note *head, struct thread *thread, void *regs)
{
void *name;
struct elf_prpsinfo64 *prpsinfo;
head->namesz = sizeof("CORE");
head->descsz = sizeof(struct elf_prpsinfo64);
head->type = NT_PRPSINFO;
name = (void *) (head + 1);
memcpy(name, "CORE", sizeof("CORE"));
prpsinfo = (struct elf_prpsinfo64 *)(name + align32(sizeof("CORE")));
prpsinfo->pr_state = thread->status;
prpsinfo->pr_pid = thread->proc->pid;
/*
We leave most of the fields unfilled.
char pr_sname;
char pr_zomb;
char pr_nice;
a8_uint64_t pr_flag;
unsigned int pr_uid;
unsigned int pr_gid;
int pr_ppid, pr_pgrp, pr_sid;
char pr_fname[16];
char pr_psargs[ELF_PRARGSZ];
*/
}
/**
* \brief Return the size of the AUXV entry of the NOTE segment.
*
*/
int get_auxv_size(void)
{
return sizeof(struct note) + align32(sizeof("CORE"))
+ sizeof(unsigned long) * AUXV_LEN;
}
/**
* \brief Fill an AUXV structure.
*
* \param head A pointer to a note structure.
* \param thread A pointer to the current thread structure.
* \param regs A pointer to a x86_regs structure.
*/
void fill_auxv(struct note *head, struct thread *thread, void *regs)
{
void *name;
void *auxv;
head->namesz = sizeof("CORE");
head->descsz = sizeof(unsigned long) * AUXV_LEN;
head->type = NT_AUXV;
name = (void *) (head + 1);
memcpy(name, "CORE", sizeof("CORE"));
auxv = name + align32(sizeof("CORE"));
memcpy(auxv, thread->proc->saved_auxv, sizeof(unsigned long) * AUXV_LEN);
}
/**
* \brief Return the size of the whole NOTE segment.
*
*/
int get_note_size(void)
{
return get_prstatus_size() + get_prpsinfo_size()
+ get_auxv_size();
}
/**
* \brief Fill the NOTE segment.
*
* \param head A pointer to a note structure.
* \param thread A pointer to the current thread structure.
* \param regs A pointer to a x86_regs structure.
*/
void fill_note(void *note, struct thread *thread, void *regs)
{
fill_prstatus(note, thread, regs);
note += get_prstatus_size();
fill_prpsinfo(note, thread, regs);
note += get_prpsinfo_size();
fill_auxv(note, thread, regs);
}
/**
* \brief Generate an image of the core file.
*
* \param thread A pointer to the current thread structure.
* \param regs A pointer to a x86_regs structure.
* \param coretable(out) An array of core chunks.
* \param chunks(out) Number of the entires of coretable.
*
* A core chunk is represented by a pair of a physical
* address of memory region and its size. If there are
* no corresponding physical address for a VM area
* (an unallocated demand-paging page, e.g.), the address
* should be zero.
*/
int gencore(struct thread *thread, void *regs,
struct coretable **coretable, int *chunks)
{
struct coretable *ct = NULL;
Elf64_Ehdr eh;
Elf64_Phdr *ph = NULL;
void *note = NULL;
struct vm_range *range, *next;
struct process_vm *vm = thread->vm;
int segs = 1; /* the first one is for NOTE */
int notesize, phsize, alignednotesize;
unsigned int offset = 0;
int i;
*chunks = 3; /* Elf header , header table and NOTE segment */
if (vm == NULL) {
dkprintf("no vm found.\n");
return -1;
}
next = lookup_process_memory_range(vm, 0, -1);
while ((range = next)) {
next = next_process_memory_range(vm, range);
dkprintf("start:%lx end:%lx flag:%lx objoff:%lx\n",
range->start, range->end, range->flag, range->objoff);
/* We omit reserved areas because they are only for
mckernel's internal use. */
if (range->flag & VR_RESERVED)
continue;
/* We need a chunk for each page for a demand paging area.
This can be optimized for spacial complexity but we would
lose simplicity instead. */
if (range->flag & VR_DEMAND_PAGING) {
unsigned long p, phys;
int prevzero = 0;
for (p = range->start; p < range->end; p += PAGE_SIZE) {
if (ihk_mc_pt_virt_to_phys(thread->vm->address_space->page_table,
(void *)p, &phys) != 0) {
prevzero = 1;
} else {
if (prevzero == 1)
(*chunks)++;
(*chunks)++;
prevzero = 0;
}
}
if (prevzero == 1)
(*chunks)++;
} else {
(*chunks)++;
}
segs++;
}
dkprintf("we have %d segs and %d chunks.\n\n", segs, *chunks);
{
struct vm_regions region = thread->vm->region;
dkprintf("text: %lx-%lx\n", region.text_start, region.text_end);
dkprintf("data: %lx-%lx\n", region.data_start, region.data_end);
dkprintf("brk: %lx-%lx\n", region.brk_start, region.brk_end);
dkprintf("map: %lx-%lx\n", region.map_start, region.map_end);
dkprintf("stack: %lx-%lx\n", region.stack_start, region.stack_end);
dkprintf("user: %lx-%lx\n\n", region.user_start, region.user_end);
}
dkprintf("now generate a core file image\n");
offset += sizeof(eh);
fill_elf_header(&eh, segs);
/* program header table */
phsize = sizeof(Elf64_Phdr) * segs;
ph = kmalloc(phsize, IHK_MC_AP_NOWAIT);
if (ph == NULL) {
dkprintf("could not alloc a program header table.\n");
goto fail;
}
memset(ph, 0, phsize);
offset += phsize;
/* NOTE segment
* To align the next segment page-sized, we prepare a padded
* region for our NOTE segment.
*/
notesize = get_note_size();
alignednotesize = alignpage(notesize + offset) - offset;
note = kmalloc(alignednotesize, IHK_MC_AP_NOWAIT);
if (note == NULL) {
dkprintf("could not alloc NOTE for core.\n");
goto fail;
}
memset(note, 0, alignednotesize);
fill_note(note, thread, regs);
/* prgram header for NOTE segment is exceptional */
ph[0].p_type = PT_NOTE;
ph[0].p_flags = 0;
ph[0].p_offset = offset;
ph[0].p_vaddr = 0;
ph[0].p_paddr = 0;
ph[0].p_filesz = notesize;
ph[0].p_memsz = notesize;
ph[0].p_align = 0;
offset += alignednotesize;
/* program header for each memory chunk */
i = 1;
next = lookup_process_memory_range(vm, 0, -1);
while ((range = next)) {
next = next_process_memory_range(vm, range);
unsigned long flag = range->flag;
unsigned long size = range->end - range->start;
if (range->flag & VR_RESERVED)
continue;
ph[i].p_type = PT_LOAD;
ph[i].p_flags = ((flag & VR_PROT_READ) ? PF_R : 0)
| ((flag & VR_PROT_WRITE) ? PF_W : 0)
| ((flag & VR_PROT_EXEC) ? PF_X : 0);
ph[i].p_offset = offset;
ph[i].p_vaddr = range->start;
ph[i].p_paddr = 0;
ph[i].p_filesz = size;
ph[i].p_memsz = size;
ph[i].p_align = PAGE_SIZE;
i++;
offset += size;
}
/* coretable to send to host */
ct = kmalloc(sizeof(struct coretable) * (*chunks), IHK_MC_AP_NOWAIT);
if (!ct) {
dkprintf("could not alloc a coretable.\n");
goto fail;
}
ct[0].addr = virt_to_phys(&eh); /* ELF header */
ct[0].len = 64;
dkprintf("coretable[0]: %lx@%lx(%lx)\n", ct[0].len, ct[0].addr, &eh);
ct[1].addr = virt_to_phys(ph); /* program header table */
ct[1].len = phsize;
dkprintf("coretable[1]: %lx@%lx(%lx)\n", ct[1].len, ct[1].addr, ph);
ct[2].addr = virt_to_phys(note); /* NOTE segment */
ct[2].len = alignednotesize;
dkprintf("coretable[2]: %lx@%lx(%lx)\n", ct[2].len, ct[2].addr, note);
i = 3; /* memory segments */
next = lookup_process_memory_range(vm, 0, -1);
while ((range = next)) {
next = next_process_memory_range(vm, range);
unsigned long phys;
if (range->flag & VR_RESERVED)
continue;
if (range->flag & VR_DEMAND_PAGING) {
/* Just an ad hoc kluge. */
unsigned long p, start, phys;
int prevzero = 0;
unsigned long size = 0;
for (start = p = range->start;
p < range->end; p += PAGE_SIZE) {
if (ihk_mc_pt_virt_to_phys(thread->vm->address_space->page_table,
(void *)p, &phys) != 0) {
if (prevzero == 0) {
/* We begin a new chunk */
size = PAGE_SIZE;
start = p;
} else {
/* We extend the previous chunk */
size += PAGE_SIZE;
}
prevzero = 1;
} else {
if (prevzero == 1) {
/* Flush out an empty chunk */
ct[i].addr = 0;
ct[i].len = size;
dkprintf("coretable[%d]: %lx@%lx(%lx)\n", i,
ct[i].len, ct[i].addr, start);
i++;
}
ct[i].addr = phys;
ct[i].len = PAGE_SIZE;
dkprintf("coretable[%d]: %lx@%lx(%lx)\n", i,
ct[i].len, ct[i].addr, p);
i++;
prevzero = 0;
}
}
if (prevzero == 1) {
/* An empty chunk */
ct[i].addr = 0;
ct[i].len = size;
dkprintf("coretable[%d]: %lx@%lx(%lx)\n", i,
ct[i].len, ct[i].addr, start);
i++;
}
} else {
if ((thread->vm->region.user_start <= range->start) &&
(range->end <= thread->vm->region.user_end)) {
if (ihk_mc_pt_virt_to_phys(thread->vm->address_space->page_table,
(void *)range->start, &phys) != 0) {
dkprintf("could not convert user virtual address %lx"
"to physical address", range->start);
goto fail;
}
} else {
phys = virt_to_phys((void *)range->start);
}
ct[i].addr = phys;
ct[i].len = range->end - range->start;
dkprintf("coretable[%d]: %lx@%lx(%lx)\n", i,
ct[i].len, ct[i].addr, range->start);
i++;
}
}
*coretable = ct;
return 0;
fail:
if (ct)
kfree(ct);
if (ph)
kfree(ph);
if (note)
kfree(note);
return -1;
}
/**
* \brief Free all the allocated spaces for an image of the core file.
*
* \param coretable An array of core chunks.
*/
void freecore(struct coretable **coretable)
{
struct coretable *ct = *coretable;
kfree(phys_to_virt(ct[2].addr)); /* NOTE segment */
kfree(phys_to_virt(ct[1].addr)); /* ph */
kfree(*coretable);
}
#endif /* !POSTK_DEBUG_ARCH_DEP_18 */

View File

@ -1,4 +1,4 @@
/* head.S COPYRIGHT FUJITSU LIMITED 2015-2018 */
/* head.S COPYRIGHT FUJITSU LIMITED 2015-2017 */
#include <linkage.h>
#include <ptrace.h>
@ -11,6 +11,8 @@
#include <arm-gic-v3.h>
#define KERNEL_RAM_VADDR MAP_KERNEL_START
#define EARLY_ALLOC_VADDR MAP_EARLY_ALLOC
#define BOOT_PARAM_VADDR MAP_BOOT_PARAM
//#ifndef CONFIG_SMP
//# define PTE_FLAGS PTE_TYPE_PAGE | PTE_AF
@ -48,6 +50,16 @@
add \ttb0, \ttb0, \virt_to_phys
.endm
#ifdef CONFIG_ARM64_64K_PAGES
# define BLOCK_SHIFT PAGE_SHIFT
# define BLOCK_SIZE PAGE_SIZE
# define TABLE_SHIFT PMD_SHIFT
#else
# define BLOCK_SHIFT SECTION_SHIFT
# define BLOCK_SIZE SECTION_SIZE
# define TABLE_SHIFT PUD_SHIFT
#endif
#define KERNEL_START KERNEL_RAM_VADDR
#define KERNEL_END _end
@ -75,7 +87,6 @@
#define TRAMPOLINE_DATA_CPU_MAP_SIZE_SIZE 0x08
#define TRAMPOLINE_DATA_CPU_MAP_SIZE (NR_CPUS * 8)
#define TRAMPOLINE_DATA_DATA_RDISTS_PA_SIZE (NR_CPUS * 8)
#define TRAMPOLINE_DATA_RETENTION_STATE_FLAG_PA_SIZE 0x08
#define TRAMPOLINE_DATA_NR_PMU_AFFI_SIZE 0x04
#define TRAMPOLINE_DATA_PMU_AFF_SIZE (CONFIG_SMP_MAX_CORES * 4)
@ -94,9 +105,9 @@
.globl ihk_param_gic_percpu_offset, ihk_param_gic_version
.globl ihk_param_lpj, ihk_param_hz, ihk_param_psci_method
.globl ihk_param_cpu_logical_map, ihk_param_gic_rdist_base_pa
.globl ihk_param_pmu_irq_affi, ihk_param_nr_pmu_irq_affi
.globl ihk_param_pmu_irq_affiniry, ihk_param_nr_pmu_irq_affiniry
.globl ihk_param_use_virt_timer, ihk_param_evtstrm_timer_rate
.globl ihk_param_retention_state_flag_pa, ihk_param_default_vl
.globl ihk_param_default_vl
ihk_param_head:
ihk_param_param_addr:
.quad 0
@ -134,11 +145,9 @@ ihk_param_cpu_logical_map:
.skip NR_CPUS * 8 /* array of the MPIDR and the core number */
ihk_param_gic_rdist_base_pa:
.skip NR_CPUS * 8 /* per-cpu re-distributer PA */
ihk_param_retention_state_flag_pa:
.quad 0
ihk_param_pmu_irq_affi:
ihk_param_pmu_irq_affiniry:
.skip CONFIG_SMP_MAX_CORES * 4 /* array of the pmu affinity list */
ihk_param_nr_pmu_irq_affi:
ihk_param_nr_pmu_irq_affiniry:
.word 0 /* number of pmu affinity list elements. */
/* @ref.impl arch/arm64/include/asm/kvm_arm.h */
@ -256,17 +265,13 @@ ENTRY(arch_start)
mov x16, #NR_CPUS /* calc next data */
lsl x16, x16, 3
add x0, x0, x16
/* retention_state_flag_pa */
ldr x16, [x0], #TRAMPOLINE_DATA_RETENTION_STATE_FLAG_PA_SIZE
adr x15, ihk_param_retention_state_flag_pa
str x16, [x15]
/* nr_pmu_irq_affi */
/* nr_pmu_irq_affiniry */
ldr w16, [x0], #TRAMPOLINE_DATA_NR_PMU_AFFI_SIZE
adr x15, ihk_param_nr_pmu_irq_affi
adr x15, ihk_param_nr_pmu_irq_affiniry
str w16, [x15]
/* pmu_irq_affi */
/* pmu_irq_affiniry */
mov x18, x0
adr x15, ihk_param_pmu_irq_affi
adr x15, ihk_param_pmu_irq_affiniry
b 2f
1: ldr w17, [x18], #4
str w17, [x15], #4
@ -405,17 +410,14 @@ __create_page_tables:
* Map the early_alloc_pages area, kernel_img next block
*/
ldr x3, =KERNEL_END
add x3, x3, x28 // __pa(KERNEL_END)
add x3, x3, x28 // __pa(KERNEL_END)
add x3, x3, #BLOCK_SIZE
sub x3, x3, #1
bic x3, x3, #(BLOCK_SIZE - 1) // start PA calc.
ldr x5, =KERNEL_END // get start VA
add x5, x5, #BLOCK_SIZE
sub x5, x5, #1
bic x5, x5, #(BLOCK_SIZE - 1) // start VA calc.
mov x6, #MAP_EARLY_ALLOC_SIZE
sub x3, x3, #1
bic x3, x3, #(BLOCK_SIZE - 1) // start PA calc.
ldr x5, =EARLY_ALLOC_VADDR // get start VA
mov x6, #1
lsl x6, x6, #(PAGE_SHIFT + MAP_EARLY_ALLOC_SHIFT)
add x6, x5, x6 // end VA calc
mov x23, x6 // save end VA
sub x6, x6, #1 // inclusive range
create_block_map x0, x7, x3, x5, x6
@ -423,13 +425,11 @@ __create_page_tables:
* Map the boot_param area
*/
adr x3, ihk_param_param_addr
ldr x3, [x3] // get boot_param PA
mov x5, x23 // get start VA
add x5, x5, #BLOCK_SIZE
sub x5, x5, #1
bic x5, x5, #(BLOCK_SIZE - 1) // start VA calc
mov x6, #MAP_BOOT_PARAM_SIZE
add x6, x5, x6 // end VA calc.
ldr x3, [x3] // get boot_param PA
ldr x5, =BOOT_PARAM_VADDR // get boot_param VA
mov x6, #1
lsl x6, x6, #MAP_BOOT_PARAM_SHIFT
add x6, x5, x6 // end VA calc
sub x6, x6, #1 // inclusive range
create_block_map x0, x7, x3, x5, x6

View File

@ -7,7 +7,6 @@
#include <hw_breakpoint.h>
#include <arch-memory.h>
#include <signal.h>
#include <process.h>
/* @ref.impl arch/arm64/kernel/hw_breakpoint.c::core_num_[brps|wrps] */
/* Number of BRP/WRP registers on this CPU. */

View File

@ -1,131 +0,0 @@
/* imp-sysreg.c COPYRIGHT FUJITSU LIMITED 2018 */
#include <sysreg.h>
/* hpc */
ACCESS_REG_FUNC(fj_tag_address_ctrl_el1, IMP_FJ_TAG_ADDRESS_CTRL_EL1);
ACCESS_REG_FUNC(pf_ctrl_el1, IMP_PF_CTRL_EL1);
ACCESS_REG_FUNC(pf_stream_detect_ctrl_el0, IMP_PF_STREAM_DETECT_CTRL_EL0);
ACCESS_REG_FUNC(pf_injection_ctrl0_el0, IMP_PF_INJECTION_CTRL0_EL0);
ACCESS_REG_FUNC(pf_injection_ctrl1_el0, IMP_PF_INJECTION_CTRL1_EL0);
ACCESS_REG_FUNC(pf_injection_ctrl2_el0, IMP_PF_INJECTION_CTRL2_EL0);
ACCESS_REG_FUNC(pf_injection_ctrl3_el0, IMP_PF_INJECTION_CTRL3_EL0);
ACCESS_REG_FUNC(pf_injection_ctrl4_el0, IMP_PF_INJECTION_CTRL4_EL0);
ACCESS_REG_FUNC(pf_injection_ctrl5_el0, IMP_PF_INJECTION_CTRL5_EL0);
ACCESS_REG_FUNC(pf_injection_ctrl6_el0, IMP_PF_INJECTION_CTRL6_EL0);
ACCESS_REG_FUNC(pf_injection_ctrl7_el0, IMP_PF_INJECTION_CTRL7_EL0);
ACCESS_REG_FUNC(pf_injection_distance0_el0, IMP_PF_INJECTION_DISTANCE0_EL0);
ACCESS_REG_FUNC(pf_injection_distance1_el0, IMP_PF_INJECTION_DISTANCE1_EL0);
ACCESS_REG_FUNC(pf_injection_distance2_el0, IMP_PF_INJECTION_DISTANCE2_EL0);
ACCESS_REG_FUNC(pf_injection_distance3_el0, IMP_PF_INJECTION_DISTANCE3_EL0);
ACCESS_REG_FUNC(pf_injection_distance4_el0, IMP_PF_INJECTION_DISTANCE4_EL0);
ACCESS_REG_FUNC(pf_injection_distance5_el0, IMP_PF_INJECTION_DISTANCE5_EL0);
ACCESS_REG_FUNC(pf_injection_distance6_el0, IMP_PF_INJECTION_DISTANCE6_EL0);
ACCESS_REG_FUNC(pf_injection_distance7_el0, IMP_PF_INJECTION_DISTANCE7_EL0);
static void hpc_prefetch_regs_init(void)
{
uint64_t reg = 0;
/* PF_CTRL_EL1 */
reg = IMP_PF_CTRL_EL1_EL1AE_ENABLE | IMP_PF_CTRL_EL1_EL0AE_ENABLE;
xos_access_pf_ctrl_el1(WRITE_ACCESS, &reg);
/* PF_STREAM_DETECT_CTRL */
reg = 0;
xos_access_pf_stream_detect_ctrl_el0(WRITE_ACCESS, &reg);
/* PF_INJECTION_CTRL */
reg = 0;
xos_access_pf_injection_ctrl0_el0(WRITE_ACCESS, &reg);
xos_access_pf_injection_ctrl1_el0(WRITE_ACCESS, &reg);
xos_access_pf_injection_ctrl2_el0(WRITE_ACCESS, &reg);
xos_access_pf_injection_ctrl3_el0(WRITE_ACCESS, &reg);
xos_access_pf_injection_ctrl4_el0(WRITE_ACCESS, &reg);
xos_access_pf_injection_ctrl5_el0(WRITE_ACCESS, &reg);
xos_access_pf_injection_ctrl6_el0(WRITE_ACCESS, &reg);
xos_access_pf_injection_ctrl7_el0(WRITE_ACCESS, &reg);
/* PF_INJECTION_DISTANCE */
reg = 0;
xos_access_pf_injection_distance0_el0(WRITE_ACCESS, &reg);
xos_access_pf_injection_distance1_el0(WRITE_ACCESS, &reg);
xos_access_pf_injection_distance2_el0(WRITE_ACCESS, &reg);
xos_access_pf_injection_distance3_el0(WRITE_ACCESS, &reg);
xos_access_pf_injection_distance4_el0(WRITE_ACCESS, &reg);
xos_access_pf_injection_distance5_el0(WRITE_ACCESS, &reg);
xos_access_pf_injection_distance6_el0(WRITE_ACCESS, &reg);
xos_access_pf_injection_distance7_el0(WRITE_ACCESS, &reg);
}
static void hpc_tag_address_regs_init(void)
{
uint64_t reg = IMP_FJ_TAG_ADDRESS_CTRL_EL1_TBO0_MASK |
IMP_FJ_TAG_ADDRESS_CTRL_EL1_SEC0_MASK |
IMP_FJ_TAG_ADDRESS_CTRL_EL1_PFE0_MASK;
/* FJ_TAG_ADDRESS_CTRL */
xos_access_fj_tag_address_ctrl_el1(WRITE_ACCESS, &reg);
}
void hpc_registers_init(void)
{
hpc_prefetch_regs_init();
hpc_tag_address_regs_init();
}
/* vhbm */
ACCESS_REG_FUNC(barrier_ctrl_el1, IMP_BARRIER_CTRL_EL1);
ACCESS_REG_FUNC(barrier_bst_bit_el1, IMP_BARRIER_BST_BIT_EL1);
ACCESS_REG_FUNC(barrier_init_sync_bb0_el1, IMP_BARRIER_INIT_SYNC_BB0_EL1);
ACCESS_REG_FUNC(barrier_init_sync_bb1_el1, IMP_BARRIER_INIT_SYNC_BB1_EL1);
ACCESS_REG_FUNC(barrier_init_sync_bb2_el1, IMP_BARRIER_INIT_SYNC_BB2_EL1);
ACCESS_REG_FUNC(barrier_init_sync_bb3_el1, IMP_BARRIER_INIT_SYNC_BB3_EL1);
ACCESS_REG_FUNC(barrier_init_sync_bb4_el1, IMP_BARRIER_INIT_SYNC_BB4_EL1);
ACCESS_REG_FUNC(barrier_init_sync_bb5_el1, IMP_BARRIER_INIT_SYNC_BB5_EL1);
ACCESS_REG_FUNC(barrier_assign_sync_w0_el1, IMP_BARRIER_ASSIGN_SYNC_W0_EL1);
ACCESS_REG_FUNC(barrier_assign_sync_w1_el1, IMP_BARRIER_ASSIGN_SYNC_W1_EL1);
ACCESS_REG_FUNC(barrier_assign_sync_w2_el1, IMP_BARRIER_ASSIGN_SYNC_W2_EL1);
ACCESS_REG_FUNC(barrier_assign_sync_w3_el1, IMP_BARRIER_ASSIGN_SYNC_W3_EL1);
void vhbm_barrier_registers_init(void)
{
uint64_t reg = 0;
reg = IMP_BARRIER_CTRL_EL1_EL1AE_ENABLE |
IMP_BARRIER_CTRL_EL1_EL0AE_ENABLE;
xos_access_barrier_ctrl_el1(WRITE_ACCESS, &reg);
reg = 0;
xos_access_barrier_init_sync_bb0_el1(WRITE_ACCESS, &reg);
xos_access_barrier_init_sync_bb1_el1(WRITE_ACCESS, &reg);
xos_access_barrier_init_sync_bb2_el1(WRITE_ACCESS, &reg);
xos_access_barrier_init_sync_bb3_el1(WRITE_ACCESS, &reg);
xos_access_barrier_init_sync_bb4_el1(WRITE_ACCESS, &reg);
xos_access_barrier_init_sync_bb5_el1(WRITE_ACCESS, &reg);
xos_access_barrier_assign_sync_w0_el1(WRITE_ACCESS, &reg);
xos_access_barrier_assign_sync_w1_el1(WRITE_ACCESS, &reg);
xos_access_barrier_assign_sync_w2_el1(WRITE_ACCESS, &reg);
xos_access_barrier_assign_sync_w3_el1(WRITE_ACCESS, &reg);
}
/* sccr */
ACCESS_REG_FUNC(sccr_ctrl_el1, IMP_SCCR_CTRL_EL1);
ACCESS_REG_FUNC(sccr_assign_el1, IMP_SCCR_ASSIGN_EL1);
ACCESS_REG_FUNC(sccr_set0_l2_el1, IMP_SCCR_SET0_L2_EL1);
ACCESS_REG_FUNC(sccr_l1_el0, IMP_SCCR_L1_EL0);
void scdrv_registers_init(void)
{
uint64_t reg = 0;
reg = IMP_SCCR_CTRL_EL1_EL1AE_MASK;
xos_access_sccr_ctrl_el1(WRITE_ACCESS, &reg);
reg = 0;
xos_access_sccr_assign_el1(WRITE_ACCESS, &reg);
xos_access_sccr_l1_el0(WRITE_ACCESS, &reg);
reg = (14UL << IMP_SCCR_SET0_L2_EL1_L2_SEC0_SHIFT);
xos_access_sccr_set0_l2_el1(WRITE_ACCESS, &reg);
}

View File

@ -1,4 +1,4 @@
/* arch-futex.h COPYRIGHT FUJITSU LIMITED 2015-2018 */
/* arch-futex.h COPYRIGHT FUJITSU LIMITED 2015 */
#ifndef __HEADER_ARM64_COMMON_ARCH_FUTEX_H
#define __HEADER_ARM64_COMMON_ARCH_FUTEX_H
@ -32,13 +32,12 @@
* @ref.impl
* linux-linaro/arch/arm64/include/asm/futex.h:futex_atomic_op_inuser
*/
static inline int futex_atomic_op_inuser(int encoded_op,
int __user *uaddr)
static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op & 0x00fff000) >> 12;
int cmparg = encoded_op & 0xfff;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret, tmp;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))

View File

@ -1,4 +1,4 @@
/* arch-lock.h COPYRIGHT FUJITSU LIMITED 2015-2018 */
/* arch-lock.h COPYRIGHT FUJITSU LIMITED 2015-2017 */
#ifndef __HEADER_ARM64_COMMON_ARCH_LOCK_H
#define __HEADER_ARM64_COMMON_ARCH_LOCK_H
@ -8,7 +8,6 @@
#include <ihk/atomic.h>
#include "affinity.h"
#include <lwk/compiler.h>
#include "config.h"
//#define DEBUG_SPINLOCK
//#define DEBUG_MCS_RWLOCK
@ -22,14 +21,14 @@ int __kprintf(const char *format, ...);
/* @ref.impl arch/arm64/include/asm/spinlock_types.h::arch_spinlock_t */
typedef struct {
#ifdef __AARCH64EB__
uint16_t next;
uint16_t owner;
#else /* __AARCH64EB__ */
//#ifdef __AARCH64EB__
// uint16_t next;
// uint16_t owner;
//#else /* __AARCH64EB__ */
uint16_t owner;
uint16_t next;
#endif /* __AARCH64EB__ */
} __attribute__((aligned(4))) ihk_spinlock_t;
//#endif /* __AARCH64EB__ */
} ihk_spinlock_t;
extern void preempt_enable(void);
extern void preempt_disable(void);
@ -37,100 +36,14 @@ extern void preempt_disable(void);
/* @ref.impl arch/arm64/include/asm/spinlock_types.h::__ARCH_SPIN_LOCK_UNLOCKED */
#define SPIN_LOCK_UNLOCKED { 0, 0 }
/* @ref.impl arch/arm64/include/asm/barrier.h::__nops */
#define __nops(n) ".rept " #n "\nnop\n.endr\n"
/* @ref.impl ./arch/arm64/include/asm/lse.h::ARM64_LSE_ATOMIC_INSN */
/* else defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) */
#define ARM64_LSE_ATOMIC_INSN(llsc, lse) llsc
/* initialized spinlock struct */
static void ihk_mc_spinlock_init(ihk_spinlock_t *lock)
{
*lock = (ihk_spinlock_t)SPIN_LOCK_UNLOCKED;
}
#ifdef DEBUG_SPINLOCK
#define ihk_mc_spinlock_trylock_noirq(l) { \
int rc; \
__kprintf("[%d] call ihk_mc_spinlock_trylock_noirq %p %s:%d\n", \
ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
rc = __ihk_mc_spinlock_trylock_noirq(l); \
__kprintf("[%d] ret ihk_mc_spinlock_trylock_noirq\n", \
ihk_mc_get_processor_id()); \
rc; \
}
#else
#define ihk_mc_spinlock_trylock_noirq __ihk_mc_spinlock_trylock_noirq
#endif
/* @ref.impl arch/arm64/include/asm/spinlock.h::arch_spin_trylock */
/* spinlock trylock */
static int __ihk_mc_spinlock_trylock_noirq(ihk_spinlock_t *lock)
{
unsigned int tmp;
ihk_spinlock_t lockval;
int success;
preempt_disable();
asm volatile(ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
" prfm pstl1strm, %2\n"
"1: ldaxr %w0, %2\n"
" eor %w1, %w0, %w0, ror #16\n"
" cbnz %w1, 2f\n"
" add %w0, %w0, %3\n"
" stxr %w1, %w0, %2\n"
" cbnz %w1, 1b\n"
"2:",
/* LSE atomics */
" ldr %w0, %2\n"
" eor %w1, %w0, %w0, ror #16\n"
" cbnz %w1, 1f\n"
" add %w1, %w0, %3\n"
" casa %w0, %w1, %2\n"
" sub %w1, %w1, %3\n"
" eor %w1, %w1, %w0\n"
"1:")
: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
: "I" (1 << TICKET_SHIFT)
: "memory");
success = !tmp;
if (!success) {
preempt_enable();
}
return success;
}
#ifdef DEBUG_SPINLOCK
#define ihk_mc_spinlock_trylock(l, result) ({ \
unsigned long rc; \
__kprintf("[%d] call ihk_mc_spinlock_trylock %p %s:%d\n", \
ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
rc = __ihk_mc_spinlock_trylock(l, result); \
__kprintf("[%d] ret ihk_mc_spinlock_trylock\n", \
ihk_mc_get_processor_id()); \
rc; \
})
#else
#define ihk_mc_spinlock_trylock __ihk_mc_spinlock_trylock
#endif
/* spinlock trylock & interrupt disable & PSTATE.DAIF save */
static unsigned long __ihk_mc_spinlock_trylock(ihk_spinlock_t *lock,
int *result)
{
unsigned long flags;
flags = cpu_disable_interrupt_save();
*result = __ihk_mc_spinlock_trylock_noirq(lock);
return flags;
}
/* @ref.impl arch/arm64/include/asm/spinlock.h::arch_spin_lock */
/* spinlock lock */
#ifdef DEBUG_SPINLOCK
#define ihk_mc_spinlock_lock_noirq(l) { \
__kprintf("[%d] call ihk_mc_spinlock_lock_noirq %p %s:%d\n", ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
@ -141,8 +54,6 @@ __kprintf("[%d] ret ihk_mc_spinlock_lock_noirq\n", ihk_mc_get_processor_id()); \
#define ihk_mc_spinlock_lock_noirq __ihk_mc_spinlock_lock_noirq
#endif
/* @ref.impl arch/arm64/include/asm/spinlock.h::arch_spin_lock */
/* spinlock lock */
static void __ihk_mc_spinlock_lock_noirq(ihk_spinlock_t *lock)
{
unsigned int tmp;
@ -152,19 +63,11 @@ static void __ihk_mc_spinlock_lock_noirq(ihk_spinlock_t *lock)
asm volatile(
/* Atomically increment the next ticket. */
ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
" prfm pstl1strm, %3\n"
"1: ldaxr %w0, %3\n"
" add %w1, %w0, %w5\n"
" stxr %w2, %w1, %3\n"
" cbnz %w2, 1b\n",
/* LSE atomics */
" mov %w2, %w5\n"
" ldadda %w2, %w0, %3\n"
__nops(3)
)
" cbnz %w2, 1b\n"
/* Did we get the lock? */
" eor %w1, %w0, %w0, ror #16\n"
" cbz %w1, 3f\n"
@ -184,6 +87,7 @@ static void __ihk_mc_spinlock_lock_noirq(ihk_spinlock_t *lock)
: "memory");
}
/* spinlock lock & interrupt disable & PSTATE.DAIF save */
#ifdef DEBUG_SPINLOCK
#define ihk_mc_spinlock_lock(l) ({ unsigned long rc;\
__kprintf("[%d] call ihk_mc_spinlock_lock %p %s:%d\n", ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
@ -193,8 +97,6 @@ __kprintf("[%d] ret ihk_mc_spinlock_lock\n", ihk_mc_get_processor_id()); rc;\
#else
#define ihk_mc_spinlock_lock __ihk_mc_spinlock_lock
#endif
/* spinlock lock & interrupt disable & PSTATE.DAIF save */
static unsigned long __ihk_mc_spinlock_lock(ihk_spinlock_t *lock)
{
unsigned long flags;
@ -206,6 +108,8 @@ static unsigned long __ihk_mc_spinlock_lock(ihk_spinlock_t *lock)
return flags;
}
/* @ref.impl arch/arm64/include/asm/spinlock.h::arch_spin_unlock */
/* spinlock unlock */
#ifdef DEBUG_SPINLOCK
#define ihk_mc_spinlock_unlock_noirq(l) { \
__kprintf("[%d] call ihk_mc_spinlock_unlock_noirq %p %s:%d\n", ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
@ -215,24 +119,12 @@ __kprintf("[%d] ret ihk_mc_spinlock_unlock_noirq\n", ihk_mc_get_processor_id());
#else
#define ihk_mc_spinlock_unlock_noirq __ihk_mc_spinlock_unlock_noirq
#endif
/* @ref.impl arch/arm64/include/asm/spinlock.h::arch_spin_unlock */
/* spinlock unlock */
static void __ihk_mc_spinlock_unlock_noirq(ihk_spinlock_t *lock)
{
unsigned long tmp;
asm volatile(ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
" ldrh %w1, %0\n"
" add %w1, %w1, #1\n"
" stlrh %w1, %0",
/* LSE atomics */
" mov %w1, #1\n"
" staddlh %w1, %0\n"
__nops(1))
: "=Q" (lock->owner), "=&r" (tmp)
:
asm volatile(
" stlrh %w1, %0\n"
: "=Q" (lock->owner)
: "r" (lock->owner + 1)
: "memory");
preempt_enable();
@ -260,11 +152,7 @@ typedef struct mcs_lock_node {
unsigned long locked;
struct mcs_lock_node *next;
unsigned long irqsave;
#ifndef ENABLE_UBSAN
} __aligned(64) mcs_lock_node_t;
#else
} mcs_lock_node_t;
#endif
} __attribute__((aligned(64))) mcs_lock_node_t;
typedef mcs_lock_node_t mcs_lock_t;
@ -354,22 +242,14 @@ typedef struct mcs_rwlock_node {
char dmy1; // unused
char dmy2; // unused
struct mcs_rwlock_node *next;
#ifndef ENABLE_UBSAN
} __aligned(64) mcs_rwlock_node_t;
#else
} mcs_rwlock_node_t;
#endif
} __attribute__((aligned(64))) mcs_rwlock_node_t;
typedef struct mcs_rwlock_node_irqsave {
#ifndef SPINLOCK_IN_MCS_RWLOCK
struct mcs_rwlock_node node;
#endif
unsigned long irqsave;
#ifndef ENABLE_UBSAN
} __aligned(64) mcs_rwlock_node_irqsave_t;
#else
} mcs_rwlock_node_irqsave_t;
#endif
} __attribute__((aligned(64))) mcs_rwlock_node_irqsave_t;
typedef struct mcs_rwlock_lock {
#ifdef SPINLOCK_IN_MCS_RWLOCK
@ -378,11 +258,7 @@ typedef struct mcs_rwlock_lock {
struct mcs_rwlock_node reader; /* common reader lock */
struct mcs_rwlock_node *node; /* base */
#endif
#ifndef ENABLE_UBSAN
} __aligned(64) mcs_rwlock_lock_t;
#else
} mcs_rwlock_lock_t;
#endif
} __attribute__((aligned(64))) mcs_rwlock_lock_t;
static void
mcs_rwlock_init(struct mcs_rwlock_lock *lock)
@ -730,18 +606,16 @@ __mcs_rwlock_reader_unlock(struct mcs_rwlock_lock *lock, struct mcs_rwlock_node_
#endif
}
#if defined(CONFIG_HAS_NMI)
#include <arm-gic-v3.h>
static inline int irqflags_can_interrupt(unsigned long flags)
{
return (flags == ICC_PMR_EL1_UNMASKED);
#ifdef CONFIG_HAS_NMI
#warning irqflags_can_interrupt needs testing/fixing on such a target
return flags > ICC_PMR_EL1_MASKED;
#else
// PSTATE.DAIF I bit clear means interrupt is possible
return !(flags & (1 << 7));
#endif
}
#else /* CONFIG_HAS_NMI */
static inline int irqflags_can_interrupt(unsigned long flags)
{
return !(flags & 0x2);
}
#endif /* CONFIG_HAS_NMI */
#endif /* !__HEADER_ARM64_COMMON_ARCH_LOCK_H */

View File

@ -1,110 +1,96 @@
/* arch-memory.h COPYRIGHT FUJITSU LIMITED 2015-2018 */
/* arch-memory.h COPYRIGHT FUJITSU LIMITED 2015-2017 */
#ifndef __HEADER_ARM64_COMMON_ARCH_MEMORY_H
#define __HEADER_ARM64_COMMON_ARCH_MEMORY_H
#include <const.h>
#include <errno.h>
#ifndef __ASSEMBLY__
#include <list.h>
#include <page.h>
void panic(const char *);
#endif /*__ASSEMBLY__*/
#define _SZ4KB (1UL<<12)
#define _SZ16KB (1UL<<14)
#define _SZ64KB (1UL<<16)
#ifdef CONFIG_ARM64_64K_PAGES
# define GRANULE_SIZE _SZ64KB
# define BLOCK_SHIFT PAGE_SHIFT
# define BLOCK_SIZE PAGE_SIZE
# define TABLE_SHIFT PMD_SHIFT
# define GRANULE_SIZE _SZ64KB
#else
# define GRANULE_SIZE _SZ4KB
# define BLOCK_SHIFT SECTION_SHIFT
# define BLOCK_SIZE SECTION_SIZE
# define TABLE_SHIFT PUD_SHIFT
# define GRANULE_SIZE _SZ4KB
#endif
#define VA_BITS CONFIG_ARM64_VA_BITS
/*
* Address define
*/
/* early alloc area address */
/* START:_end, SIZE:512 pages */
#define MAP_KERNEL_SHIFT 21
#define MAP_KERNEL_SIZE (UL(1) << MAP_KERNEL_SHIFT)
#define MAP_EARLY_ALLOC_SHIFT 9
#define MAP_EARLY_ALLOC_SIZE (UL(1) << (PAGE_SHIFT + MAP_EARLY_ALLOC_SHIFT))
#ifndef __ASSEMBLY__
# define ALIGN_UP(x, align) ALIGN_DOWN((x) + (align) - 1, align)
# define ALIGN_DOWN(x, align) ((x) & ~((align) - 1))
extern char _end[];
# define MAP_EARLY_ALLOC (ALIGN_UP((unsigned long)_end, BLOCK_SIZE))
# define MAP_EARLY_ALLOC_END (MAP_EARLY_ALLOC + MAP_EARLY_ALLOC_SIZE)
#endif /* !__ASSEMBLY__ */
/* bootparam area address */
/* START:early alloc area end, SIZE:2MiB */
#define MAP_BOOT_PARAM_SHIFT 21
#define MAP_BOOT_PARAM_SIZE (UL(1) << MAP_BOOT_PARAM_SHIFT)
#ifndef __ASSEMBLY__
# define MAP_BOOT_PARAM (ALIGN_UP(MAP_EARLY_ALLOC_END, BLOCK_SIZE))
# define MAP_BOOT_PARAM_END (MAP_BOOT_PARAM + MAP_BOOT_PARAM_SIZE)
#endif /* !__ASSEMBLY__ */
#if (VA_BITS == 39 && GRANULE_SIZE == _SZ4KB)
#
# define LD_TASK_UNMAPPED_BASE UL(0x0000000400000000)
# define TASK_UNMAPPED_BASE UL(0x0000000800000000)
# define USER_END UL(0x0000002000000000)
# define MAP_VMAP_START UL(0xffffffbdc0000000)
# define MAP_VMAP_SIZE UL(0x0000000100000000)
# define MAP_FIXED_START UL(0xffffffbffbdfd000)
# define MAP_ST_START UL(0xffffffc000000000)
# define MAP_KERNEL_START UL(0xffffffffff800000)
# define MAP_KERNEL_START UL(0xffffffffff800000) // 0xffff_ffff_ff80_0000
# define MAP_ST_SIZE (MAP_KERNEL_START - MAP_ST_START) // 0x0000_003f_ff80_0000
# define MAP_EARLY_ALLOC (MAP_KERNEL_START + MAP_KERNEL_SIZE) // 0xffff_ffff_ffa0_0000
# define MAP_EARLY_ALLOC_END (MAP_EARLY_ALLOC + MAP_EARLY_ALLOC_SIZE)
# define MAP_BOOT_PARAM (MAP_EARLY_ALLOC_END) // 0xffff_ffff_ffc0_0000
# define MAP_BOOT_PARAM_END (MAP_BOOT_PARAM + MAP_BOOT_PARAM_SIZE) // 0xffff_ffff_ffe0_0000
#
#elif (VA_BITS == 42 && GRANULE_SIZE == _SZ64KB)
#
# define LD_TASK_UNMAPPED_BASE UL(0x0000002000000000)
# define TASK_UNMAPPED_BASE UL(0x0000004000000000)
# define USER_END UL(0x0000010000000000)
# define MAP_VMAP_START UL(0xfffffdfee0000000)
# define MAP_VMAP_SIZE UL(0x0000000100000000)
# define MAP_FIXED_START UL(0xfffffdfffbdd0000)
# define MAP_ST_START UL(0xfffffe0000000000)
# define MAP_KERNEL_START UL(0xffffffffe0000000)
# define MAP_KERNEL_START UL(0xffffffffe0000000) // 0xffff_ffff_e000_0000
# define MAP_ST_SIZE (MAP_KERNEL_START - MAP_ST_START) // 0x0000_01ff_e000_0000
# define MAP_EARLY_ALLOC (MAP_KERNEL_START + MAP_KERNEL_SIZE) // 0xffff_ffff_e020_0000
# define MAP_EARLY_ALLOC_END (MAP_EARLY_ALLOC + MAP_EARLY_ALLOC_SIZE)
# define MAP_BOOT_PARAM (MAP_EARLY_ALLOC_END) // 0xffff_ffff_e220_0000
# define MAP_BOOT_PARAM_END (MAP_BOOT_PARAM + MAP_BOOT_PARAM_SIZE) // 0xffff_ffff_e240_0000
#
#elif (VA_BITS == 48 && GRANULE_SIZE == _SZ4KB)
#
# define LD_TASK_UNMAPPED_BASE UL(0x0000080000000000)
# define TASK_UNMAPPED_BASE UL(0x0000100000000000)
# define USER_END UL(0x0000400000000000)
# define MAP_VMAP_START UL(0xffff7bffc0000000)
# define MAP_VMAP_SIZE UL(0x0000000100000000)
# define MAP_FIXED_START UL(0xffff7ffffbdfd000)
# define MAP_ST_START UL(0xffff800000000000)
# define MAP_KERNEL_START UL(0xffffffffff800000)
# define MAP_KERNEL_START UL(0xffffffffff800000) // 0xffff_ffff_ff80_0000
# define MAP_ST_SIZE (MAP_KERNEL_START - MAP_ST_START) // 0x0000_7fff_ff80_0000
# define MAP_EARLY_ALLOC (MAP_KERNEL_START + MAP_KERNEL_SIZE) // 0xffff_ffff_ffa0_0000
# define MAP_EARLY_ALLOC_END (MAP_EARLY_ALLOC + MAP_EARLY_ALLOC_SIZE)
# define MAP_BOOT_PARAM (MAP_EARLY_ALLOC_END) // 0xffff_ffff_ffc0_0000
# define MAP_BOOT_PARAM_END (MAP_BOOT_PARAM + MAP_BOOT_PARAM_SIZE) // 0xffff_ffff_ffe0_0000
#
#
#elif (VA_BITS == 48 && GRANULE_SIZE == _SZ64KB)
#
# define LD_TASK_UNMAPPED_BASE UL(0x0000080000000000)
# define TASK_UNMAPPED_BASE UL(0x0000100000000000)
# define USER_END UL(0x0000400000000000)
# define MAP_VMAP_START UL(0xffff780000000000)
# define MAP_VMAP_SIZE UL(0x0000000100000000)
# define MAP_FIXED_START UL(0xffff7ffffbdd0000)
# define MAP_ST_START UL(0xffff800000000000)
# define MAP_KERNEL_START UL(0xffffffffe0000000)
# define MAP_KERNEL_START UL(0xffffffffe0000000) // 0xffff_ffff_e000_0000
# define MAP_ST_SIZE (MAP_KERNEL_START - MAP_ST_START) // 0x0000_7fff_e000_0000
# define MAP_EARLY_ALLOC (MAP_KERNEL_START + MAP_KERNEL_SIZE) // 0xffff_ffff_e020_0000
# define MAP_EARLY_ALLOC_END (MAP_EARLY_ALLOC + MAP_EARLY_ALLOC_SIZE)
# define MAP_BOOT_PARAM (MAP_EARLY_ALLOC_END) // 0xffff_ffff_e220_0000
# define MAP_BOOT_PARAM_END (MAP_BOOT_PARAM + MAP_BOOT_PARAM_SIZE) // 0xffff_ffff_e240_0000
#
#else
# error address space is not defined.
#endif
#define MAP_ST_SIZE (MAP_KERNEL_START - MAP_ST_START)
#define STACK_TOP(region) ((region)->user_end)
#define STACK_TOP(region) ((region)->user_end)
/*
* pagetable define
@ -118,10 +104,7 @@ extern char _end[];
# define PTL3_INDEX_MASK PTL4_INDEX_MASK
# define PTL2_INDEX_MASK PTL3_INDEX_MASK
# define PTL1_INDEX_MASK PTL2_INDEX_MASK
# define __PTL4_CONT_SHIFT (__PTL4_SHIFT + 0)
# define __PTL3_CONT_SHIFT (__PTL3_SHIFT + 4)
# define __PTL2_CONT_SHIFT (__PTL2_SHIFT + 4)
# define __PTL1_CONT_SHIFT (__PTL1_SHIFT + 4)
# define FIRST_LEVEL_BLOCK_SUPPORT 1
#elif GRANULE_SIZE == _SZ16KB
# define __PTL4_SHIFT 47
# define __PTL3_SHIFT 36
@ -131,12 +114,9 @@ extern char _end[];
# define PTL3_INDEX_MASK ((UL(1) << 11) - 1)
# define PTL2_INDEX_MASK PTL3_INDEX_MASK
# define PTL1_INDEX_MASK PTL2_INDEX_MASK
# define __PTL4_CONT_SHIFT (__PTL4_SHIFT + 0)
# define __PTL3_CONT_SHIFT (__PTL3_SHIFT + 0)
# define __PTL2_CONT_SHIFT (__PTL2_SHIFT + 5)
# define __PTL1_CONT_SHIFT (__PTL1_SHIFT + 7)
# define FIRST_LEVEL_BLOCK_SUPPORT 0
#elif GRANULE_SIZE == _SZ64KB
# define __PTL4_SHIFT 55
# define __PTL4_SHIFT 0
# define __PTL3_SHIFT 42
# define __PTL2_SHIFT 29
# define __PTL1_SHIFT 16
@ -144,39 +124,19 @@ extern char _end[];
# define PTL3_INDEX_MASK ((UL(1) << 6) - 1)
# define PTL2_INDEX_MASK ((UL(1) << 13) - 1)
# define PTL1_INDEX_MASK PTL2_INDEX_MASK
# define __PTL4_CONT_SHIFT (__PTL4_SHIFT + 0)
# define __PTL3_CONT_SHIFT (__PTL3_SHIFT + 0)
# define __PTL2_CONT_SHIFT (__PTL2_SHIFT + 5)
# define __PTL1_CONT_SHIFT (__PTL1_SHIFT + 5)
# define FIRST_LEVEL_BLOCK_SUPPORT 0
#else
# error granule size error.
#endif
#ifndef __ASSEMBLY__
extern int first_level_block_support;
#endif /* __ASSEMBLY__ */
# define __PTL4_SIZE (UL(1) << __PTL4_SHIFT)
# define __PTL3_SIZE (UL(1) << __PTL3_SHIFT)
# define __PTL2_SIZE (UL(1) << __PTL2_SHIFT)
# define __PTL1_SIZE (UL(1) << __PTL1_SHIFT)
# define __PTL4_MASK (~(__PTL4_SIZE - 1))
# define __PTL3_MASK (~(__PTL3_SIZE - 1))
# define __PTL2_MASK (~(__PTL2_SIZE - 1))
# define __PTL1_MASK (~(__PTL1_SIZE - 1))
# define __PTL4_CONT_SIZE (UL(1) << __PTL4_CONT_SHIFT)
# define __PTL3_CONT_SIZE (UL(1) << __PTL3_CONT_SHIFT)
# define __PTL2_CONT_SIZE (UL(1) << __PTL2_CONT_SHIFT)
# define __PTL1_CONT_SIZE (UL(1) << __PTL1_CONT_SHIFT)
# define __PTL4_CONT_MASK (~(__PTL4_CONT_SIZE - 1))
# define __PTL3_CONT_MASK (~(__PTL3_CONT_SIZE - 1))
# define __PTL2_CONT_MASK (~(__PTL2_CONT_SIZE - 1))
# define __PTL1_CONT_MASK (~(__PTL1_CONT_SIZE - 1))
# define __PTL4_CONT_COUNT (UL(1) << (__PTL4_CONT_SHIFT - __PTL4_SHIFT))
# define __PTL3_CONT_COUNT (UL(1) << (__PTL3_CONT_SHIFT - __PTL3_SHIFT))
# define __PTL2_CONT_COUNT (UL(1) << (__PTL2_CONT_SHIFT - __PTL2_SHIFT))
# define __PTL1_CONT_COUNT (UL(1) << (__PTL1_CONT_SHIFT - __PTL1_SHIFT))
# define __PTL4_MASK (~__PTL4_SIZE - 1)
# define __PTL3_MASK (~__PTL3_SIZE - 1)
# define __PTL2_MASK (~__PTL2_SIZE - 1)
# define __PTL1_MASK (~__PTL1_SIZE - 1)
/* calculate entries */
#if (CONFIG_ARM64_PGTABLE_LEVELS > 3) && (VA_BITS > __PTL4_SHIFT)
@ -223,22 +183,6 @@ static const unsigned int PTL4_ENTRIES = __PTL4_ENTRIES;
static const unsigned int PTL3_ENTRIES = __PTL3_ENTRIES;
static const unsigned int PTL2_ENTRIES = __PTL2_ENTRIES;
static const unsigned int PTL1_ENTRIES = __PTL1_ENTRIES;
static const unsigned int PTL4_CONT_SHIFT = __PTL4_CONT_SHIFT;
static const unsigned int PTL3_CONT_SHIFT = __PTL3_CONT_SHIFT;
static const unsigned int PTL2_CONT_SHIFT = __PTL2_CONT_SHIFT;
static const unsigned int PTL1_CONT_SHIFT = __PTL1_CONT_SHIFT;
static const unsigned long PTL4_CONT_SIZE = __PTL4_CONT_SIZE;
static const unsigned long PTL3_CONT_SIZE = __PTL3_CONT_SIZE;
static const unsigned long PTL2_CONT_SIZE = __PTL2_CONT_SIZE;
static const unsigned long PTL1_CONT_SIZE = __PTL1_CONT_SIZE;
static const unsigned long PTL4_CONT_MASK = __PTL4_CONT_MASK;
static const unsigned long PTL3_CONT_MASK = __PTL3_CONT_MASK;
static const unsigned long PTL2_CONT_MASK = __PTL2_CONT_MASK;
static const unsigned long PTL1_CONT_MASK = __PTL1_CONT_MASK;
static const unsigned int PTL4_CONT_COUNT = __PTL4_CONT_COUNT;
static const unsigned int PTL3_CONT_COUNT = __PTL3_CONT_COUNT;
static const unsigned int PTL2_CONT_COUNT = __PTL2_CONT_COUNT;
static const unsigned int PTL1_CONT_COUNT = __PTL1_CONT_COUNT;
#else
# define PTL4_SHIFT __PTL4_SHIFT
# define PTL3_SHIFT __PTL3_SHIFT
@ -256,26 +200,8 @@ static const unsigned int PTL1_CONT_COUNT = __PTL1_CONT_COUNT;
# define PTL3_ENTRIES __PTL3_ENTRIES
# define PTL2_ENTRIES __PTL2_ENTRIES
# define PTL1_ENTRIES __PTL1_ENTRIES
# define PTL4_CONT_SHIFT __PTL4_CONT_SHIFT
# define PTL3_CONT_SHIFT __PTL3_CONT_SHIFT
# define PTL2_CONT_SHIFT __PTL2_CONT_SHIFT
# define PTL1_CONT_SHIFT __PTL1_CONT_SHIFT
# define PTL4_CONT_SIZE __PTL4_CONT_SIZE
# define PTL3_CONT_SIZE __PTL3_CONT_SIZE
# define PTL2_CONT_SIZE __PTL2_CONT_SIZE
# define PTL1_CONT_SIZE __PTL1_CONT_SIZE
# define PTL4_CONT_MASK __PTL4_CONT_MASK
# define PTL3_CONT_MASK __PTL3_CONT_MASK
# define PTL2_CONT_MASK __PTL2_CONT_MASK
# define PTL1_CONT_MASK __PTL1_CONT_MASK
# define PTL4_CONT_COUNT __PTL4_CONT_COUNT
# define PTL3_CONT_COUNT __PTL3_CONT_COUNT
# define PTL2_CONT_COUNT __PTL2_CONT_COUNT
# define PTL1_CONT_COUNT __PTL1_CONT_COUNT
#endif/*__ASSEMBLY__*/
#define __page_size(pgshift) (UL(1) << (pgshift))
#define __page_mask(pgsize) (~((pgsize) - 1))
#define __page_offset(addr, size) ((unsigned long)(addr) & ((size) - 1))
#define __page_align(addr, size) ((unsigned long)(addr) & ~((size) - 1))
#define __page_align_up(addr, size) __page_align((unsigned long)(addr) + (size) - 1, size)
@ -284,8 +210,8 @@ static const unsigned int PTL1_CONT_COUNT = __PTL1_CONT_COUNT;
* nornal page
*/
#define PAGE_SHIFT __PTL1_SHIFT
#define PAGE_SIZE __page_size(PAGE_SHIFT)
#define PAGE_MASK __page_mask(PAGE_SIZE)
#define PAGE_SIZE (UL(1) << __PTL1_SHIFT)
#define PAGE_MASK (~(PTL1_SIZE - 1))
#define PAGE_P2ALIGN 0
#define page_offset(addr) __page_offset(addr, PAGE_SIZE)
#define page_align(addr) __page_align(addr, PAGE_SIZE)
@ -295,8 +221,8 @@ static const unsigned int PTL1_CONT_COUNT = __PTL1_CONT_COUNT;
* large page
*/
#define LARGE_PAGE_SHIFT __PTL2_SHIFT
#define LARGE_PAGE_SIZE __page_size(LARGE_PAGE_SHIFT)
#define LARGE_PAGE_MASK __page_mask(LARGE_PAGE_SIZE)
#define LARGE_PAGE_SIZE (UL(1) << __PTL2_SHIFT)
#define LARGE_PAGE_MASK (~(PTL2_SIZE - 1))
#define LARGE_PAGE_P2ALIGN (LARGE_PAGE_SHIFT - PAGE_SHIFT)
#define large_page_offset(addr) __page_offset(addr, LARGE_PAGE_SIZE)
#define large_page_align(addr) __page_align(addr, LARGE_PAGE_SIZE)
@ -337,18 +263,6 @@ static const unsigned int PTL1_CONT_COUNT = __PTL1_CONT_COUNT;
#define PTE_FILEOFF PTE_SPECIAL
#ifdef CONFIG_ARM64_64K_PAGES
# define USER_STACK_PREPAGE_SIZE PAGE_SIZE
# define USER_STACK_PAGE_MASK PAGE_MASK
# define USER_STACK_PAGE_P2ALIGN PAGE_P2ALIGN
# define USER_STACK_PAGE_SHIFT PAGE_SHIFT
#else
# define USER_STACK_PREPAGE_SIZE LARGE_PAGE_SIZE
# define USER_STACK_PAGE_MASK LARGE_PAGE_MASK
# define USER_STACK_PAGE_P2ALIGN LARGE_PAGE_P2ALIGN
# define USER_STACK_PAGE_SHIFT LARGE_PAGE_SHIFT
#endif
#define PT_ENTRIES (PAGE_SIZE >> 3)
#ifndef __ASSEMBLY__
@ -398,8 +312,6 @@ enum ihk_mc_pt_attribute {
PTATTR_FOR_USER = UL(1) << (PHYS_MASK_SHIFT - 1),
/* WriteCombine */
PTATTR_WRITE_COMBINED = PTE_ATTRINDX(2),
/* converted flag */
ARCH_PTATTR_FLIPPED = PTE_PROT_NONE,
};
extern enum ihk_mc_pt_attribute attr_mask;
@ -411,23 +323,18 @@ static inline int pfn_is_write_combined(uintptr_t pfn)
//共通部と意味がするビット定義
#define attr_flip_bits (PTATTR_WRITABLE | PTATTR_LARGEPAGE)
static inline int pgsize_to_tbllv(size_t pgsize);
static inline int pte_is_type_page(const pte_t *ptep, size_t pgsize)
{
int ret = 0; //default D_TABLE
int level = pgsize_to_tbllv(pgsize);
switch (level) {
case 4:
case 3:
case 2:
if ((PTL4_SIZE == pgsize && CONFIG_ARM64_PGTABLE_LEVELS > 3) ||
(PTL3_SIZE == pgsize && CONFIG_ARM64_PGTABLE_LEVELS > 2) ||
(PTL2_SIZE == pgsize)) {
// check D_BLOCK
ret = ((*ptep & PMD_TYPE_MASK) == PMD_TYPE_SECT);
break;
case 1:
}
else if (PTL1_SIZE == pgsize) {
// check D_PAGE
ret = ((*ptep & PTE_TYPE_MASK) == PTE_TYPE_PAGE);
break;
}
return ret;
}
@ -506,18 +413,21 @@ static inline enum ihk_mc_pt_attribute pte_get_attr(pte_t *ptep, size_t pgsize)
static inline void pte_make_null(pte_t *ptep, size_t pgsize)
{
*ptep = PTE_NULL;
if ((PTL4_SIZE == pgsize && CONFIG_ARM64_PGTABLE_LEVELS > 3) ||
(PTL3_SIZE == pgsize && CONFIG_ARM64_PGTABLE_LEVELS > 2) ||
(PTL2_SIZE == pgsize) ||
(PTL1_SIZE == pgsize)) {
*ptep = PTE_NULL;
}
}
static inline void pte_make_fileoff(off_t off,
enum ihk_mc_pt_attribute ptattr, size_t pgsize, pte_t *ptep)
{
if (((PTL4_SIZE == pgsize || PTL4_CONT_SIZE == pgsize)
&& CONFIG_ARM64_PGTABLE_LEVELS > 3) ||
((PTL3_SIZE == pgsize || PTL3_CONT_SIZE == pgsize)
&& CONFIG_ARM64_PGTABLE_LEVELS > 2) ||
(PTL2_SIZE == pgsize || PTL2_CONT_SIZE == pgsize) ||
(PTL1_SIZE == pgsize || PTL1_CONT_SIZE == pgsize)) {
if ((PTL4_SIZE == pgsize && CONFIG_ARM64_PGTABLE_LEVELS > 3) ||
(PTL3_SIZE == pgsize && CONFIG_ARM64_PGTABLE_LEVELS > 2) ||
(PTL2_SIZE == pgsize) ||
(PTL1_SIZE == pgsize)) {
*ptep = PTE_FILEOFF | off | PTE_TYPE_PAGE;
}
}
@ -547,260 +457,7 @@ static inline void pte_set_dirty(pte_t *ptep, size_t pgsize)
}
}
static inline int pte_is_contiguous(const pte_t *ptep)
{
return !!(*ptep & PTE_CONT);
}
static inline int pgsize_is_contiguous(size_t pgsize)
{
int ret = 0;
if ((pgsize == PTL4_CONT_SIZE && CONFIG_ARM64_PGTABLE_LEVELS > 3) ||
(pgsize == PTL3_CONT_SIZE && CONFIG_ARM64_PGTABLE_LEVELS > 2) ||
(pgsize == PTL2_CONT_SIZE) ||
(pgsize == PTL1_CONT_SIZE)) {
ret = 1;
}
return ret;
}
static inline int pgsize_to_tbllv(size_t pgsize)
{
int level = -EINVAL;
if ((pgsize == PTL4_CONT_SIZE || pgsize == PTL4_SIZE)
&& (CONFIG_ARM64_PGTABLE_LEVELS > 3)) {
level = 4;
} else if ((pgsize == PTL3_CONT_SIZE || pgsize == PTL3_SIZE)
&& (CONFIG_ARM64_PGTABLE_LEVELS > 2)) {
level = 3;
} else if (pgsize == PTL2_CONT_SIZE || pgsize == PTL2_SIZE) {
level = 2;
} else if (pgsize == PTL1_CONT_SIZE || pgsize == PTL1_SIZE) {
level = 1;
}
return level;
}
static inline size_t tbllv_to_pgsize(int level)
{
size_t pgsize = 0;
switch (level) {
case 4:
if (CONFIG_ARM64_PGTABLE_LEVELS > 3) {
pgsize = PTL4_SIZE;
} else {
panic("page table level 4 is invalid.");
}
break;
case 3:
if (CONFIG_ARM64_PGTABLE_LEVELS > 2) {
pgsize = PTL3_SIZE;
} else {
panic("page table level 3 is invalid.");
}
break;
case 2:
pgsize = PTL2_SIZE;
break;
case 1:
pgsize = PTL1_SIZE;
break;
default:
panic("page table level is invalid.");
}
return pgsize;
}
static inline size_t tbllv_to_contpgsize(int level)
{
size_t pgsize = 0;
switch (level) {
case 4:
if (CONFIG_ARM64_PGTABLE_LEVELS > 3) {
pgsize = PTL4_CONT_SIZE;
} else {
panic("page table level 4 is invalid.");
}
break;
case 3:
if (CONFIG_ARM64_PGTABLE_LEVELS > 2) {
pgsize = PTL3_CONT_SIZE;
} else {
panic("page table level 3 is invalid.");
}
break;
case 2:
pgsize = PTL2_CONT_SIZE;
break;
case 1:
pgsize = PTL1_CONT_SIZE;
break;
default:
panic("page table level is invalid.");
}
return pgsize;
}
static inline int tbllv_to_contpgshift(int level)
{
int ret = 0;
switch (level) {
case 4:
if (CONFIG_ARM64_PGTABLE_LEVELS > 3) {
ret = PTL4_CONT_SHIFT;
} else {
panic("page table level 4 is invalid.");
}
break;
case 3:
if (CONFIG_ARM64_PGTABLE_LEVELS > 2) {
ret = PTL3_CONT_SHIFT;
} else {
panic("page table level 3 is invalid.");
}
break;
case 2:
ret = PTL2_CONT_SHIFT;
break;
case 1:
ret = PTL1_CONT_SHIFT;
break;
default:
panic("page table level is invalid.");
}
return ret;
}
static inline pte_t *get_contiguous_head(pte_t *__ptep, size_t __pgsize)
{
unsigned long align;
int shift = 0;
switch (pgsize_to_tbllv(__pgsize)) {
case 4:
if (CONFIG_ARM64_PGTABLE_LEVELS > 3) {
shift = PTL4_CONT_SHIFT - PTL4_SHIFT;
} else {
panic("page table level 4 is invalid.");
}
break;
case 3:
if (CONFIG_ARM64_PGTABLE_LEVELS > 2) {
shift = PTL3_CONT_SHIFT - PTL3_SHIFT;
} else {
panic("page table level 3 is invalid.");
}
break;
case 2:
shift = PTL2_CONT_SHIFT - PTL2_SHIFT;
break;
case 1:
shift = PTL1_CONT_SHIFT - PTL1_SHIFT;
break;
default:
panic("page table level is invalid.");
}
align = sizeof(*__ptep) << shift;
return (pte_t *)__page_align(__ptep, align);
}
static inline pte_t *get_contiguous_tail(pte_t *__ptep, size_t __pgsize)
{
unsigned long align;
int shift = 0;
switch (pgsize_to_tbllv(__pgsize)) {
case 4:
if (CONFIG_ARM64_PGTABLE_LEVELS > 3) {
shift = PTL4_CONT_SHIFT - PTL4_SHIFT;
} else {
panic("page table level 4 is invalid.");
}
break;
case 3:
if (CONFIG_ARM64_PGTABLE_LEVELS > 2) {
shift = PTL3_CONT_SHIFT - PTL3_SHIFT;
} else {
panic("page table level 3 is invalid.");
}
break;
case 2:
shift = PTL2_CONT_SHIFT - PTL2_SHIFT;
break;
case 1:
shift = PTL1_CONT_SHIFT - PTL1_SHIFT;
break;
default:
panic("page table level is invalid.");
}
align = sizeof(*__ptep) << shift;
return (pte_t *)__page_align_up(__ptep + 1, align) - 1;
}
static inline int split_contiguous_pages(pte_t *ptep, size_t pgsize)
{
int ret;
pte_t *head = get_contiguous_head(ptep, pgsize);
pte_t *tail = get_contiguous_tail(ptep, pgsize);
pte_t *ptr;
uintptr_t phys;
struct page *page;
phys = pte_get_phys(head);
page = phys_to_page(phys);
if (page && (page_is_in_memobj(page)
|| page_is_multi_mapped(page))) {
ret = -EINVAL;
goto out;
}
for (ptr = head; ptr <= tail; ptr++) {
*ptr &= ~PTE_CONT;
}
ret = 0;
out:
return ret;
}
static inline int page_is_contiguous_head(pte_t *ptep, size_t pgsize)
{
pte_t *ptr = get_contiguous_head(ptep, pgsize);
return (ptr == ptep);
}
static inline int page_is_contiguous_tail(pte_t *ptep, size_t pgsize)
{
pte_t *ptr = get_contiguous_tail(ptep, pgsize);
return (ptr == ptep);
}
/* Return true if PTE doesn't belong to a contiguous PTE group or PTE
* is the head of a contiguous PTE group
*/
static inline int pte_is_head(pte_t *ptep, pte_t *old, size_t cont_size)
{
if (!pte_is_contiguous(old))
return 1;
return page_is_contiguous_head(ptep, cont_size);
}
struct page_table;
void arch_adjust_allocate_page_size(struct page_table *pt,
uintptr_t fault_addr,
pte_t *ptep,
void **pgaddrp,
size_t *pgsizep);
void set_pte(pte_t *ppte, unsigned long phys, enum ihk_mc_pt_attribute attr);
pte_t *get_pte(struct page_table *pt, void *virt, enum ihk_mc_pt_attribute attr);

View File

@ -1,16 +1,9 @@
/* arch-perfctr.h COPYRIGHT FUJITSU LIMITED 2016-2018 */
/* arch-perfctr.h COPYRIGHT FUJITSU LIMITED 2016-2017 */
#ifndef __ARCH_PERFCTR_H__
#define __ARCH_PERFCTR_H__
#include <ihk/types.h>
#include <ihk/cpu.h>
#include <bitops.h>
struct per_cpu_arm_pmu {
int num_events;
#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
};
/* @ref.impl arch/arm64/include/asm/pmu.h */
struct arm_pmu {
@ -26,12 +19,9 @@ struct arm_pmu {
int (*disable_intens)(int);
int (*set_event_filter)(unsigned long*, int);
void (*write_evtype)(int, uint32_t);
int (*get_event_idx)(int num_events, unsigned long used_mask,
unsigned long config);
int (*get_event_idx)(int, unsigned long);
int (*map_event)(uint32_t, uint64_t);
void (*enable_user_access_pmu_regs)(void);
void (*disable_user_access_pmu_regs)(void);
struct per_cpu_arm_pmu *per_cpu;
int num_events;
};
static inline const struct arm_pmu* get_cpu_pmu(void)
@ -39,21 +29,10 @@ static inline const struct arm_pmu* get_cpu_pmu(void)
extern struct arm_pmu cpu_pmu;
return &cpu_pmu;
}
static inline const struct per_cpu_arm_pmu *get_per_cpu_pmu(void)
{
const struct arm_pmu *cpu_pmu = get_cpu_pmu();
return &cpu_pmu->per_cpu[ihk_mc_get_processor_id()];
}
int arm64_init_perfctr(void);
void arm64_init_per_cpu_perfctr(void);
int arm64_enable_pmu(void);
void arm64_disable_pmu(void);
int armv8pmu_init(struct arm_pmu* cpu_pmu);
void armv8pmu_per_cpu_init(struct per_cpu_arm_pmu *per_cpu);
void arm64_enable_user_access_pmu_regs(void);
void arm64_disable_user_access_pmu_regs(void);
/* TODO[PMU]: 共通部に定義があっても良い。今後の動向を見てここの定義を削除する */
#endif

View File

@ -1,9 +1,7 @@
/* arch-timer.h COPYRIGHT FUJITSU LIMITED 2016-2018 */
/* arch-timer.h COPYRIGHT FUJITSU LIMITED 2016 */
#ifndef __HEADER_ARM64_COMMON_ARCH_TIMER_H
#define __HEADER_ARM64_COMMON_ARCH_TIMER_H
#include <ihk/cpu.h>
/* @ref.impl include/clocksource/arm_arch_timer.h */
#define ARCH_TIMER_USR_PCT_ACCESS_EN (1 << 0) /* physical counter */
#define ARCH_TIMER_USR_VCT_ACCESS_EN (1 << 1) /* virtual counter */
@ -13,19 +11,4 @@
#define ARCH_TIMER_USR_VT_ACCESS_EN (1 << 8) /* virtual timer registers */
#define ARCH_TIMER_USR_PT_ACCESS_EN (1 << 9) /* physical timer registers */
/* @ref.impl linux4.10.16 */
/* include/clocksource/arm_arch_timer.h */
#define ARCH_TIMER_CTRL_ENABLE (1 << 0)
#define ARCH_TIMER_CTRL_IT_MASK (1 << 1)
#define ARCH_TIMER_CTRL_IT_STAT (1 << 2)
enum arch_timer_reg {
ARCH_TIMER_REG_CTRL,
ARCH_TIMER_REG_TVAL,
};
extern int get_timer_intrid(void);
extern void arch_timer_init(void);
extern struct ihk_mc_interrupt_handler *get_timer_handler(void);
#endif /* __HEADER_ARM64_COMMON_ARCH_TIMER_H */

View File

@ -1,4 +1,4 @@
/* cpu.h COPYRIGHT FUJITSU LIMITED 2016-2018 */
/* cpu.h COPYRIGHT FUJITSU LIMITED 2016-2017 */
#ifndef __HEADER_ARM64_ARCH_CPU_H
#define __HEADER_ARM64_ARCH_CPU_H
@ -12,8 +12,6 @@
#define dmb(opt) asm volatile("dmb " #opt : : : "memory")
#define dsb(opt) asm volatile("dsb " #opt : : : "memory")
#include <registers.h>
#define mb() dsb(sy)
#define rmb() dsb(ld)
#define wmb() dsb(st)
@ -71,10 +69,12 @@ do { \
#define smp_mb__before_atomic() smp_mb()
#define smp_mb__after_atomic() smp_mb()
/* @ref.impl linux-linaro/arch/arm64/include/asm/arch_timer.h::arch_counter_get_cntvct */
#define read_tsc() \
({ \
unsigned long cval; \
cval = rdtsc(); \
isb(); \
asm volatile("mrs %0, cntvct_el0" : "=r" (cval)); \
cval; \
})

View File

@ -21,11 +21,12 @@
/* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */
#define MAP_HUGE_SHIFT 26
#define MAP_HUGE_FIRST_BLOCK (__PTL3_SHIFT << MAP_HUGE_SHIFT)
#define MAP_HUGE_FIRST_CONT_BLOCK (__PTL3_CONT_SHIFT << MAP_HUGE_SHIFT)
#define MAP_HUGE_SECOND_BLOCK (__PTL2_SHIFT << MAP_HUGE_SHIFT)
#define MAP_HUGE_SECOND_CONT_BLOCK (__PTL2_CONT_SHIFT << MAP_HUGE_SHIFT)
#define MAP_HUGE_THIRD_CONT_BLOCK (__PTL1_CONT_SHIFT << MAP_HUGE_SHIFT)
#if FIRST_LEVEL_BLOCK_SUPPORT
# define MAP_HUGE_FIRST_BLOCK (__PTL3_SHIFT << MAP_HUGE_SHIFT)
#else
# define MAP_HUGE_FIRST_BLOCK -1 /* not supported */
#endif
#define MAP_HUGE_SECOND_BLOCK (__PTL2_SHIFT << MAP_HUGE_SHIFT)
/*
* for mlockall()

View File

@ -6,11 +6,12 @@
/* shmflg */
#define SHM_HUGE_SHIFT 26
#define SHM_HUGE_FIRST_BLOCK (__PTL3_SHIFT << SHM_HUGE_SHIFT)
#define SHM_HUGE_FIRST_CONT_BLOCK (__PTL3_CONT_SHIFT << SHM_HUGE_SHIFT)
#define SHM_HUGE_SECOND_BLOCK (__PTL2_SHIFT << SHM_HUGE_SHIFT)
#define SHM_HUGE_SECOND_CONT_BLOCK (__PTL2_CONT_SHIFT << SHM_HUGE_SHIFT)
#define SHM_HUGE_THIRD_CONT_BLOCK (__PTL1_CONT_SHIFT << SHM_HUGE_SHIFT)
#if FIRST_LEVEL_BLOCK_SUPPORT
# define SHM_HUGE_FIRST_BLOCK (__PTL3_SHIFT << SHM_HUGE_SHIFT)
#else
# define SHM_HUGE_FIRST_BLOCK -1 /* not supported */
#endif
#define SHM_HUGE_SECOND_BLOCK (__PTL2_SHIFT << SHM_HUGE_SHIFT)
struct ipc_perm {
key_t key;

View File

@ -3,29 +3,30 @@
#include <arch-memory.h>
#define DEBUG_RUSAGE
//#define DEBUG_RUSAGE
#define IHK_OS_PGSIZE_4KB 0
#define IHK_OS_PGSIZE_2MB 1
#define IHK_OS_PGSIZE_1GB 2
extern struct rusage_global *rusage;
extern struct rusage_global rusage;
#define IHK_OS_PGSIZE_4KB 0
#define IHK_OS_PGSIZE_16KB 1
#define IHK_OS_PGSIZE_64KB 2
static inline int rusage_pgsize_to_pgtype(size_t pgsize)
{
int ret = IHK_OS_PGSIZE_4KB;
if (pgsize == PTL1_SIZE) {
switch (pgsize) {
case __PTL1_SIZE:
ret = IHK_OS_PGSIZE_4KB;
}
else if (pgsize == PTL2_SIZE) {
ret = IHK_OS_PGSIZE_2MB;
}
else if (pgsize == PTL3_SIZE) {
ret = IHK_OS_PGSIZE_1GB;
}
else {
break;
case __PTL2_SIZE:
ret = IHK_OS_PGSIZE_16KB;
break;
case __PTL3_SIZE:
ret = IHK_OS_PGSIZE_64KB;
break;
default:
kprintf("%s: Error: Unknown pgsize=%ld\n", __FUNCTION__, pgsize);
break;
}
return ret;
}

View File

@ -60,9 +60,9 @@
#ifdef CONFIG_HAS_NMI
#define GICD_INT_NMI_PRI 0x40
#define GICD_INT_DEF_PRI 0xc0U
#define GICD_INT_DEF_PRI 0xc0
#else
#define GICD_INT_DEF_PRI 0xa0U
#define GICD_INT_DEF_PRI 0xa0
#endif
#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
(GICD_INT_DEF_PRI << 16) |\

View File

@ -19,7 +19,6 @@
#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H
#define __LINUX_IRQCHIP_ARM_GIC_V3_H
#include <stringify.h>
/* @ref.impl include/linux/irqchip/arm-gic-v3.h */
#include <sysreg.h>
@ -382,4 +381,11 @@
#define ICH_AP1R2_EL2 __AP1Rx_EL2(2)
#define ICH_AP1R3_EL2 __AP1Rx_EL2(3)
/**
* @ref.impl host-kernel/include/linux/stringify.h
*/
#define __stringify_1(x...) #x
#define __stringify(x...) __stringify_1(x)
#endif /* __LINUX_IRQCHIP_ARM_GIC_V3_H */

View File

@ -15,9 +15,8 @@
#define S_PC 0x100 /* offsetof(struct pt_regs, pc) */
#define S_PSTATE 0x108 /* offsetof(struct pt_regs, pstate) */
#define S_ORIG_X0 0x110 /* offsetof(struct pt_regs, orig_x0) */
#define S_ORIG_PC 0x118 /* offsetof(struct pt_regs, orig_pc) */
#define S_SYSCALLNO 0x120 /* offsetof(struct pt_regs, syscallno) */
#define S_FRAME_SIZE 0x130 /* sizeof(struct pt_regs) must be 16 byte align */
#define S_SYSCALLNO 0x118 /* offsetof(struct pt_regs, syscallno) */
#define S_FRAME_SIZE 0x120 /* sizeof(struct pt_regs) */
#define CPU_INFO_SETUP 0x10 /* offsetof(struct cpu_info, cpu_setup) */
#define CPU_INFO_SZ 0x18 /* sizeof(struct cpu_info) */

View File

@ -1,19 +0,0 @@
/* asm-syscall.h COPYRIGHT FUJITSU LIMITED 2018 */
#ifndef __HEADER_ARM64_ASM_SYSCALL_H
#define __HEADER_ARM64_ASM_SYSCALL_H
#ifdef __ASSEMBLY__
#define DECLARATOR(number, name) .equ __NR_##name, number
#define SYSCALL_HANDLED(number, name) DECLARATOR(number, name)
#define SYSCALL_DELEGATED(number, name) DECLARATOR(number, name)
#include <syscall_list.h>
#undef DECLARATOR
#undef SYSCALL_HANDLED
#undef SYSCALL_DELEGATED
#endif /* __ASSEMBLY__ */
#endif /* !__HEADER_ARM64_ASM_SYSCALL_H */

View File

@ -25,78 +25,17 @@
#define MIDR_PARTNUM(midr) \
(((midr) & MIDR_PARTNUM_MASK) >> MIDR_PARTNUM_SHIFT)
#define MIDR_ARCHITECTURE_SHIFT 16
#define MIDR_ARCHITECTURE_MASK (0xf << MIDR_ARCHITECTURE_SHIFT)
#define MIDR_ARCHITECTURE(midr) \
(((midr) & MIDR_ARCHITECTURE_MASK) >> MIDR_ARCHITECTURE_SHIFT)
#define MIDR_VARIANT_SHIFT 20
#define MIDR_VARIANT_MASK (0xf << MIDR_VARIANT_SHIFT)
#define MIDR_VARIANT(midr) \
(((midr) & MIDR_VARIANT_MASK) >> MIDR_VARIANT_SHIFT)
#define MIDR_IMPLEMENTOR_SHIFT 24
#define MIDR_IMPLEMENTOR_MASK (0xffU << MIDR_IMPLEMENTOR_SHIFT)
#define MIDR_IMPLEMENTOR_MASK (0xff << MIDR_IMPLEMENTOR_SHIFT)
#define MIDR_IMPLEMENTOR(midr) \
(((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
#define MIDR_CPU_MODEL(imp, partnum) \
(((imp) << MIDR_IMPLEMENTOR_SHIFT) | \
(0xf << MIDR_ARCHITECTURE_SHIFT) | \
((partnum) << MIDR_PARTNUM_SHIFT))
#define MIDR_CPU_VAR_REV(var, rev) \
(((var) << MIDR_VARIANT_SHIFT) | (rev))
#define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
MIDR_ARCHITECTURE_MASK)
#define MIDR_IS_CPU_MODEL_RANGE(midr, model, rv_min, rv_max) \
({ \
u32 _model = (midr) & MIDR_CPU_MODEL_MASK; \
u32 rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK); \
\
_model == (model) && rv >= (rv_min) && rv <= (rv_max); \
})
#define ARM_CPU_IMP_ARM 0x41
#define ARM_CPU_IMP_APM 0x50
#define ARM_CPU_IMP_CAVIUM 0x43
#define ARM_CPU_IMP_BRCM 0x42
#define ARM_CPU_IMP_QCOM 0x51
#define ARM_CPU_PART_AEM_V8 0xD0F
#define ARM_CPU_PART_FOUNDATION 0xD00
#define ARM_CPU_PART_CORTEX_A57 0xD07
#define ARM_CPU_PART_CORTEX_A72 0xD08
#define ARM_CPU_PART_CORTEX_A53 0xD03
#define ARM_CPU_PART_CORTEX_A73 0xD09
#define ARM_CPU_PART_CORTEX_A75 0xD0A
#define APM_CPU_PART_POTENZA 0x000
#define CAVIUM_CPU_PART_THUNDERX 0x0A1
#define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2
#define CAVIUM_CPU_PART_THUNDERX_83XX 0x0A3
#define CAVIUM_CPU_PART_THUNDERX2 0x0AF
#define BRCM_CPU_PART_VULCAN 0x516
#define QCOM_CPU_PART_FALKOR_V1 0x800
#define QCOM_CPU_PART_FALKOR 0xC00
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
#define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73)
#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
#define MIDR_CAVIUM_THUNDERX2 MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX2)
#define MIDR_BRCM_VULCAN MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_VULCAN)
#define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
#define ARM_CPU_IMP_CAVIUM 0x43
#ifndef __ASSEMBLY__

View File

@ -0,0 +1,92 @@
/* elfcore.h COPYRIGHT FUJITSU LIMITED 2015 */
#ifndef POSTK_DEBUG_ARCH_DEP_18 /* coredump arch separation. */
#ifndef __HEADER_ARM64_COMMON_ELFCORE_H
#define __HEADER_ARM64_COMMON_ELFCORE_H
typedef uint16_t Elf64_Half;
typedef uint32_t Elf64_Word;
typedef uint64_t Elf64_Xword;
typedef uint64_t Elf64_Addr;
typedef uint64_t Elf64_Off;
#define EI_NIDENT 16
typedef struct {
unsigned char e_ident[EI_NIDENT];
Elf64_Half e_type;
Elf64_Half e_machine;
Elf64_Word e_version;
Elf64_Addr e_entry;
Elf64_Off e_phoff;
Elf64_Off e_shoff;
Elf64_Word e_flags;
Elf64_Half e_ehsize;
Elf64_Half e_phentsize;
Elf64_Half e_phnum;
Elf64_Half e_shentsize;
Elf64_Half e_shnum;
Elf64_Half e_shstrndx;
} Elf64_Ehdr;
#define EI_MAG0 0
#define EI_MAG1 1
#define EI_MAG2 2
#define EI_MAG3 3
#define EI_CLASS 4
#define EI_DATA 5
#define EI_VERSION 6
#define EI_OSABI 7
#define EI_ABIVERSION 8
#define EI_PAD 9
#define ELFMAG0 0x7f
#define ELFMAG1 'E'
#define ELFMAG2 'L'
#define ELFMAG3 'F'
#define ELFCLASS64 2 /* 64-bit object */
#define ELFDATA2LSB 1 /* LSB */
#define El_VERSION 1 /* defined to be the same as EV CURRENT */
#define ELFOSABI_NONE 0 /* unspecied */
#define El_ABIVERSION_NONE 0 /* unspecied */
#define ET_CORE 4 /* Core file */
#define EM_X86_64 62 /* AMD x86-64 architecture */
#define EM_K10M 181 /* Intel K10M */
#define EV_CURRENT 1 /* Current version */
typedef struct {
Elf64_Word p_type;
Elf64_Word p_flags;
Elf64_Off p_offset;
Elf64_Addr p_vaddr;
Elf64_Addr p_paddr;
Elf64_Xword p_filesz;
Elf64_Xword p_memsz;
Elf64_Xword p_align;
} Elf64_Phdr;
#define PT_LOAD 1
#define PT_NOTE 4
#define PF_X 1 /* executable bit */
#define PF_W 2 /* writable bit */
#define PF_R 4 /* readable bit */
struct note {
Elf64_Word namesz;
Elf64_Word descsz;
Elf64_Word type;
/* name char[namesz] and desc[descsz] */
};
#define NT_PRSTATUS 1
#define NT_PRFRPREG 2
#define NT_PRPSINFO 3
#define NT_AUXV 6
#define NT_X86_STATE 0x202
#include "elfcoregpl.h"
#endif /* !__HEADER_ARM64_COMMON_ELFCORE_H */
#endif /* !POSTK_DEBUG_ARCH_DEP_18 */

View File

@ -0,0 +1,98 @@
/* elfcoregpl.h COPYRIGHT FUJITSU LIMITED 2015 */
#ifndef POSTK_DEBUG_ARCH_DEP_18 /* coredump arch separation. */
#ifndef __HEADER_ARM64_COMMON_ELFCOREGPL_H
#define __HEADER_ARM64_COMMON_ELFCOREGPL_H
#define pid_t int
/* From /usr/include/linux/elfcore.h of Linux */
#define ELF_PRARGSZ (80)
/* From /usr/include/linux/elfcore.h fro Linux */
struct elf_siginfo
{
int si_signo;
int si_code;
int si_errno;
};
/* From bfd/hosts/x86-64linux.h of gdb. */
typedef uint64_t __attribute__ ((__aligned__ (8))) a8_uint64_t;
typedef a8_uint64_t elf_greg64_t;
struct user_regs64_struct
{
a8_uint64_t r15;
a8_uint64_t r14;
a8_uint64_t r13;
a8_uint64_t r12;
a8_uint64_t rbp;
a8_uint64_t rbx;
a8_uint64_t r11;
a8_uint64_t r10;
a8_uint64_t r9;
a8_uint64_t r8;
a8_uint64_t rax;
a8_uint64_t rcx;
a8_uint64_t rdx;
a8_uint64_t rsi;
a8_uint64_t rdi;
a8_uint64_t orig_rax;
a8_uint64_t rip;
a8_uint64_t cs;
a8_uint64_t eflags;
a8_uint64_t rsp;
a8_uint64_t ss;
a8_uint64_t fs_base;
a8_uint64_t gs_base;
a8_uint64_t ds;
a8_uint64_t es;
a8_uint64_t fs;
a8_uint64_t gs;
};
#define ELF_NGREG64 (sizeof (struct user_regs64_struct) / sizeof(elf_greg64_t))
typedef elf_greg64_t elf_gregset64_t[ELF_NGREG64];
struct prstatus64_timeval
{
a8_uint64_t tv_sec;
a8_uint64_t tv_usec;
};
struct elf_prstatus64
{
struct elf_siginfo pr_info;
short int pr_cursig;
a8_uint64_t pr_sigpend;
a8_uint64_t pr_sighold;
pid_t pr_pid;
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
struct prstatus64_timeval pr_utime;
struct prstatus64_timeval pr_stime;
struct prstatus64_timeval pr_cutime;
struct prstatus64_timeval pr_cstime;
elf_gregset64_t pr_reg;
int pr_fpvalid;
};
struct elf_prpsinfo64
{
char pr_state;
char pr_sname;
char pr_zomb;
char pr_nice;
a8_uint64_t pr_flag;
unsigned int pr_uid;
unsigned int pr_gid;
int pr_pid, pr_ppid, pr_pgrp, pr_sid;
char pr_fname[16];
char pr_psargs[ELF_PRARGSZ];
};
#endif /* !__HEADER_ARM64_COMMON_ELFCOREGPL_H */
#endif /* !POSTK_DEBUG_ARCH_DEP_18 */

View File

@ -1,4 +1,5 @@
/* hwcap.h COPYRIGHT FUJITSU LIMITED 2017 */
#ifdef POSTK_DEBUG_ARCH_DEP_65
#ifndef _UAPI__ASM_HWCAP_H
#define _UAPI__ASM_HWCAP_H
@ -24,3 +25,4 @@ unsigned long arch_get_hwcap(void);
extern unsigned long elf_hwcap;
#endif /* _UAPI__ASM_HWCAP_H */
#endif /* POSTK_DEBUG_ARCH_DEP_65 */

View File

@ -1,4 +1,4 @@
/* context.h COPYRIGHT FUJITSU LIMITED 2015-2018 */
/* context.h COPYRIGHT FUJITSU LIMITED 2015-2017 */
#ifndef __HEADER_ARM64_IHK_CONTEXT_H
#define __HEADER_ARM64_IHK_CONTEXT_H
@ -27,9 +27,7 @@ struct pt_regs {
};
};
unsigned long orig_x0;
unsigned long orig_pc;
unsigned long syscallno;
unsigned long __padding;
};
typedef struct pt_regs ihk_mc_user_context_t;
@ -67,17 +65,17 @@ static inline void pt_regs_write_reg(struct pt_regs *regs, int r,
}
/* temp */
#define ihk_mc_syscall_arg0(uc) ((uc)->regs[0])
#define ihk_mc_syscall_arg1(uc) ((uc)->regs[1])
#define ihk_mc_syscall_arg2(uc) ((uc)->regs[2])
#define ihk_mc_syscall_arg3(uc) ((uc)->regs[3])
#define ihk_mc_syscall_arg4(uc) ((uc)->regs[4])
#define ihk_mc_syscall_arg5(uc) ((uc)->regs[5])
#define ihk_mc_syscall_arg0(uc) (uc)->regs[0]
#define ihk_mc_syscall_arg1(uc) (uc)->regs[1]
#define ihk_mc_syscall_arg2(uc) (uc)->regs[2]
#define ihk_mc_syscall_arg3(uc) (uc)->regs[3]
#define ihk_mc_syscall_arg4(uc) (uc)->regs[4]
#define ihk_mc_syscall_arg5(uc) (uc)->regs[5]
#define ihk_mc_syscall_ret(uc) ((uc)->regs[0])
#define ihk_mc_syscall_number(uc) ((uc)->regs[8])
#define ihk_mc_syscall_ret(uc) (uc)->regs[0]
#define ihk_mc_syscall_number(uc) (uc)->regs[8]
#define ihk_mc_syscall_pc(uc) ((uc)->pc)
#define ihk_mc_syscall_sp(uc) ((uc)->sp)
#define ihk_mc_syscall_pc(uc) (uc)->pc
#define ihk_mc_syscall_sp(uc) (uc)->sp
#endif /* !__HEADER_ARM64_IHK_CONTEXT_H */

View File

@ -20,11 +20,13 @@ typedef uint64_t size_t;
typedef int64_t ssize_t;
typedef int64_t off_t;
#ifdef POSTK_DEBUG_ARCH_DEP_18 /* coredump arch separation. */
typedef int32_t key_t;
typedef uint32_t uid_t;
typedef uint32_t gid_t;
typedef int64_t time_t;
typedef int32_t pid_t;
#endif /* POSTK_DEBUG_ARCH_DEP_18 */
#endif /* __ASSEMBLY__ */

View File

@ -1,102 +0,0 @@
/* imp-sysreg.h COPYRIGHT FUJITSU LIMITED 2016-2018 */
#ifndef __ASM_IMP_SYSREG_H
#define __ASM_IMP_SYSREG_H
#ifndef __ASSEMBLY__
/* register sys_reg list */
#define IMP_FJ_TAG_ADDRESS_CTRL_EL1 sys_reg(3, 0, 11, 2, 0)
#define IMP_SCCR_CTRL_EL1 sys_reg(3, 0, 11, 8, 0)
#define IMP_SCCR_ASSIGN_EL1 sys_reg(3, 0, 11, 8, 1)
#define IMP_SCCR_SET0_L2_EL1 sys_reg(3, 0, 15, 8, 2)
#define IMP_SCCR_SET1_L2_EL1 sys_reg(3, 0, 15, 8, 3)
#define IMP_SCCR_L1_EL0 sys_reg(3, 3, 11, 8, 2)
#define IMP_PF_CTRL_EL1 sys_reg(3, 0, 11, 4, 0)
#define IMP_PF_STREAM_DETECT_CTRL_EL0 sys_reg(3, 3, 11, 4, 0)
#define IMP_PF_INJECTION_CTRL0_EL0 sys_reg(3, 3, 11, 6, 0)
#define IMP_PF_INJECTION_CTRL1_EL0 sys_reg(3, 3, 11, 6, 1)
#define IMP_PF_INJECTION_CTRL2_EL0 sys_reg(3, 3, 11, 6, 2)
#define IMP_PF_INJECTION_CTRL3_EL0 sys_reg(3, 3, 11, 6, 3)
#define IMP_PF_INJECTION_CTRL4_EL0 sys_reg(3, 3, 11, 6, 4)
#define IMP_PF_INJECTION_CTRL5_EL0 sys_reg(3, 3, 11, 6, 5)
#define IMP_PF_INJECTION_CTRL6_EL0 sys_reg(3, 3, 11, 6, 6)
#define IMP_PF_INJECTION_CTRL7_EL0 sys_reg(3, 3, 11, 6, 7)
#define IMP_PF_INJECTION_DISTANCE0_EL0 sys_reg(3, 3, 11, 7, 0)
#define IMP_PF_INJECTION_DISTANCE1_EL0 sys_reg(3, 3, 11, 7, 1)
#define IMP_PF_INJECTION_DISTANCE2_EL0 sys_reg(3, 3, 11, 7, 2)
#define IMP_PF_INJECTION_DISTANCE3_EL0 sys_reg(3, 3, 11, 7, 3)
#define IMP_PF_INJECTION_DISTANCE4_EL0 sys_reg(3, 3, 11, 7, 4)
#define IMP_PF_INJECTION_DISTANCE5_EL0 sys_reg(3, 3, 11, 7, 5)
#define IMP_PF_INJECTION_DISTANCE6_EL0 sys_reg(3, 3, 11, 7, 6)
#define IMP_PF_INJECTION_DISTANCE7_EL0 sys_reg(3, 3, 11, 7, 7)
#define IMP_BARRIER_CTRL_EL1 sys_reg(3, 0, 11, 12, 0)
#define IMP_BARRIER_BST_BIT_EL1 sys_reg(3, 0, 11, 12, 4)
#define IMP_BARRIER_INIT_SYNC_BB0_EL1 sys_reg(3, 0, 15, 13, 0)
#define IMP_BARRIER_INIT_SYNC_BB1_EL1 sys_reg(3, 0, 15, 13, 1)
#define IMP_BARRIER_INIT_SYNC_BB2_EL1 sys_reg(3, 0, 15, 13, 2)
#define IMP_BARRIER_INIT_SYNC_BB3_EL1 sys_reg(3, 0, 15, 13, 3)
#define IMP_BARRIER_INIT_SYNC_BB4_EL1 sys_reg(3, 0, 15, 13, 4)
#define IMP_BARRIER_INIT_SYNC_BB5_EL1 sys_reg(3, 0, 15, 13, 5)
#define IMP_BARRIER_ASSIGN_SYNC_W0_EL1 sys_reg(3, 0, 15, 15, 0)
#define IMP_BARRIER_ASSIGN_SYNC_W1_EL1 sys_reg(3, 0, 15, 15, 1)
#define IMP_BARRIER_ASSIGN_SYNC_W2_EL1 sys_reg(3, 0, 15, 15, 2)
#define IMP_BARRIER_ASSIGN_SYNC_W3_EL1 sys_reg(3, 0, 15, 15, 3)
#define IMP_SOC_STANDBY_CTRL_EL1 sys_reg(3, 0, 11, 0, 0)
#define IMP_FJ_CORE_UARCH_CTRL_EL2 sys_reg(3, 4, 11, 0, 4)
#define IMP_FJ_CORE_UARCH_RESTRECTION_EL1 sys_reg(3, 0, 11, 0, 5)
/* macros */
#define PWR_REG_MASK(reg, feild) (((UL(1) << ((reg##_##feild##_MSB) - (reg##_##feild##_LSB) + 1)) - 1) << (reg##_##feild##_LSB))
/* IMP_FJ_TAG_ADDRESS_CTRL_EL1 */
#define IMP_FJ_TAG_ADDRESS_CTRL_EL1_TBO0_SHIFT (0)
#define IMP_FJ_TAG_ADDRESS_CTRL_EL1_SEC0_SHIFT (8)
#define IMP_FJ_TAG_ADDRESS_CTRL_EL1_PFE0_SHIFT (9)
#define IMP_FJ_TAG_ADDRESS_CTRL_EL1_TBO0_MASK (1UL << IMP_FJ_TAG_ADDRESS_CTRL_EL1_TBO0_SHIFT)
#define IMP_FJ_TAG_ADDRESS_CTRL_EL1_SEC0_MASK (1UL << IMP_FJ_TAG_ADDRESS_CTRL_EL1_SEC0_SHIFT)
#define IMP_FJ_TAG_ADDRESS_CTRL_EL1_PFE0_MASK (1UL << IMP_FJ_TAG_ADDRESS_CTRL_EL1_PFE0_SHIFT)
/* IMP_SCCR_CTRL_EL1 */
#define IMP_SCCR_CTRL_EL1_EL1AE_SHIFT (63)
#define IMP_SCCR_CTRL_EL1_EL1AE_MASK (1UL << IMP_SCCR_CTRL_EL1_EL1AE_SHIFT)
/* IMP_SCCR_SET0_L2_EL1 */
#define IMP_SCCR_SET0_L2_EL1_L2_SEC0_SHIFT (0)
/* IMP_PF_CTRL_EL1 */
#define IMP_PF_CTRL_EL1_EL1AE_ENABLE (1UL << 63)
#define IMP_PF_CTRL_EL1_EL0AE_ENABLE (1UL << 62)
/* IMP_BARRIER_CTRL_EL1 */
#define IMP_BARRIER_CTRL_EL1_EL1AE_ENABLE (1UL << 63)
#define IMP_BARRIER_CTRL_EL1_EL0AE_ENABLE (1UL << 62)
/* IMP_SOC_STANDBY_CTRL_EL1 */
#define IMP_SOC_STANDBY_CTRL_EL1_ECO_MODE_MSB 2
#define IMP_SOC_STANDBY_CTRL_EL1_ECO_MODE_LSB 2
#define IMP_SOC_STANDBY_CTRL_EL1_MODE_CHANGE_MSB 1
#define IMP_SOC_STANDBY_CTRL_EL1_MODE_CHANGE_LSB 1
#define IMP_SOC_STANDBY_CTRL_EL1_RETENTION_MSB 0
#define IMP_SOC_STANDBY_CTRL_EL1_RETENTION_LSB 0
#define IMP_SOC_STANDBY_CTRL_EL1_ECO_MODE PWR_REG_MASK(IMP_SOC_STANDBY_CTRL_EL1, ECO_MODE)
#define IMP_SOC_STANDBY_CTRL_EL1_MODE_CHANGE PWR_REG_MASK(IMP_SOC_STANDBY_CTRL_EL1, MODE_CHANGE)
#define IMP_SOC_STANDBY_CTRL_EL1_RETENTION PWR_REG_MASK(IMP_SOC_STANDBY_CTRL_EL1, RETENTION)
/* IMP_FJ_CORE_UARCH_RESTRECTION_EL1 */
#define IMP_FJ_CORE_UARCH_RESTRECTION_EL1_FL_RESTRICT_TRANS_MSB 33
#define IMP_FJ_CORE_UARCH_RESTRECTION_EL1_FL_RESTRICT_TRANS_LSB 33
#define IMP_FJ_CORE_UARCH_RESTRECTION_EL1_ISSUE_RESTRICTION_MSB 9
#define IMP_FJ_CORE_UARCH_RESTRECTION_EL1_ISSUE_RESTRICTION_LSB 8
#define IMP_FJ_CORE_UARCH_RESTRECTION_EL1_EX_RESTRICTION_MSB 0
#define IMP_FJ_CORE_UARCH_RESTRECTION_EL1_EX_RESTRICTION_LSB 0
#define IMP_FJ_CORE_UARCH_RESTRECTION_EL1_FL_RESTRICT_TRANS PWR_REG_MASK(IMP_FJ_CORE_UARCH_RESTRECTION_EL1, FL_RESTRICT_TRANS)
#define IMP_FJ_CORE_UARCH_RESTRECTION_EL1_ISSUE_RESTRICTION PWR_REG_MASK(IMP_FJ_CORE_UARCH_RESTRECTION_EL1, ISSUE_RESTRICTION)
#define IMP_FJ_CORE_UARCH_RESTRECTION_EL1_EX_RESTRICTION PWR_REG_MASK(IMP_FJ_CORE_UARCH_RESTRECTION_EL1, EX_RESTRICTION)
void scdrv_registers_init(void);
void hpc_registers_init(void);
void vhbm_barrier_registers_init(void);
#endif /* __ASSEMBLY__ */
#endif /* __ASM_IMP_SYSREG_H */

View File

@ -1,4 +1,4 @@
/* irq.h COPYRIGHT FUJITSU LIMITED 2015-2018 */
/* irq.h COPYRIGHT FUJITSU LIMITED 2015-2017 */
#ifndef __HEADER_ARM64_IRQ_H
#define __HEADER_ARM64_IRQ_H
@ -15,15 +15,42 @@
#define INTRID_CPU_STOP 3
#define INTRID_TLB_FLUSH 4
#define INTRID_STACK_TRACE 6
#define INTRID_MULTI_NMI 7
#define INTRID_MEMDUMP 7
/* use PPI interrupt number */
#define INTRID_PERF_OVF 23
#define INTRID_HYP_PHYS_TIMER 26 /* cnthp */
#define INTRID_VIRT_TIMER 27 /* cntv */
#define INTRID_HYP_VIRT_TIMER 28 /* cnthv */
#define INTRID_PHYS_TIMER 30 /* cntp */
/* timer intrid getter */
static int get_virt_timer_intrid(void)
{
#ifdef CONFIG_ARM64_VHE
unsigned long mmfr = read_cpuid(ID_AA64MMFR1_EL1);
if ((mmfr >> ID_AA64MMFR1_VHE_SHIFT) & 1UL) {
return INTRID_HYP_VIRT_TIMER;
}
#endif /* CONFIG_ARM64_VHE */
return INTRID_VIRT_TIMER;
}
static int get_phys_timer_intrid(void)
{
#ifdef CONFIG_ARM64_VHE
unsigned long mmfr = read_cpuid(ID_AA64MMFR1_EL1);
if ((mmfr >> ID_AA64MMFR1_VHE_SHIFT) & 1UL) {
return INTRID_HYP_PHYS_TIMER;
}
#endif /* CONFIG_ARM64_VHE */
return INTRID_PHYS_TIMER;
}
/* use timer checker */
extern unsigned long is_use_virt_timer(void);
/* Functions for GICv2 */
extern void gic_dist_init_gicv2(unsigned long dist_base_pa, unsigned long size);
extern void gic_cpu_init_gicv2(unsigned long cpu_base_pa, unsigned long size);

View File

@ -72,7 +72,6 @@
#define PMD_SECT_S (UL(3) << 8)
#define PMD_SECT_AF (UL(1) << 10)
#define PMD_SECT_NG (UL(1) << 11)
#define PMD_SECT_CONT (UL(1) << 52)
#define PMD_SECT_PXN (UL(1) << 53)
#define PMD_SECT_UXN (UL(1) << 54)
@ -94,7 +93,6 @@
#define PTE_SHARED (UL(3) << 8) /* SH[1:0], inner shareable */
#define PTE_AF (UL(1) << 10) /* Access Flag */
#define PTE_NG (UL(1) << 11) /* nG */
#define PTE_CONT (UL(1) << 52) /* Contiguous range */
#define PTE_PXN (UL(1) << 53) /* Privileged XN */
#define PTE_UXN (UL(1) << 54) /* User XN */
/* Software defined PTE bits definition.*/

View File

@ -2,9 +2,6 @@
#ifndef __HEADER_ARM64_COMMON_PRCTL_H
#define __HEADER_ARM64_COMMON_PRCTL_H
#define PR_SET_THP_DISABLE 41
#define PR_GET_THP_DISABLE 42
/* arm64 Scalable Vector Extension controls */
#define PR_SVE_SET_VL 48 /* set task vector length */
#define PR_SVE_SET_VL_THREAD (1 << 1) /* set just this thread */

View File

@ -1,10 +1,9 @@
/* registers.h COPYRIGHT FUJITSU LIMITED 2015-2018 */
/* registers.h COPYRIGHT FUJITSU LIMITED 2015-2016 */
#ifndef __HEADER_ARM64_COMMON_REGISTERS_H
#define __HEADER_ARM64_COMMON_REGISTERS_H
#include <types.h>
#include <arch/cpu.h>
#include <sysreg.h>
#define RFLAGS_CF (1 << 0)
#define RFLAGS_PF (1 << 2)
@ -77,12 +76,15 @@ static unsigned long rdmsr(unsigned int index)
return 0;
}
/* @ref.impl linux4.10.16 */
/* arch/arm64/include/asm/arch_timer.h:arch_counter_get_cntvct() */
static inline unsigned long rdtsc(void)
/* @ref.impl linux-linaro/arch/arm64/include/asm/arch_timer.h::arch_counter_get_cntvct */
static unsigned long rdtsc(void)
{
unsigned long cval;
isb();
return read_sysreg(cntvct_el0);
asm volatile("mrs %0, cntvct_el0" : "=r" (cval));
return cval;
}
static void set_perfctl(int counter, int event, int mask)

View File

@ -1,4 +1,4 @@
/* signal.h COPYRIGHT FUJITSU LIMITED 2015-2018 */
/* signal.h COPYRIGHT FUJITSU LIMITED 2015-2017 */
#ifndef __HEADER_ARM64_COMMON_SIGNAL_H
#define __HEADER_ARM64_COMMON_SIGNAL_H
@ -407,6 +407,8 @@ struct ucontext {
};
void arm64_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, int err);
void set_signal(int sig, void *regs, struct siginfo *info);
void check_signal(unsigned long rc, void *regs, int num);
void check_signal_irq_disabled(unsigned long rc, void *regs, int num);
#endif /* __HEADER_ARM64_COMMON_SIGNAL_H */

View File

@ -1,14 +1,17 @@
/* syscall_list.h COPYRIGHT FUJITSU LIMITED 2015-2018 */
/* syscall_list.h COPYRIGHT FUJITSU LIMITED 2015-2017 */
SYSCALL_DELEGATED(4, io_getevents)
SYSCALL_DELEGATED(17, getcwd)
SYSCALL_DELEGATED(22, epoll_pwait)
SYSCALL_DELEGATED(25, fcntl)
SYSCALL_HANDLED(29, ioctl)
SYSCALL_DELEGATED(35, unlinkat)
SYSCALL_DELEGATED(43, statfs)
SYSCALL_DELEGATED(44, fstatfs)
#ifdef POSTK_DEBUG_ARCH_DEP_62 /* Absorb the difference between open and openat args. */
SYSCALL_HANDLED(56, openat)
#else /* POSTK_DEBUG_ARCH_DEP_62 */
SYSCALL_DELEGATED(56, openat)
#endif /* POSTK_DEBUG_ARCH_DEP_62 */
SYSCALL_HANDLED(57, close)
SYSCALL_DELEGATED(61, getdents64)
SYSCALL_DELEGATED(62, lseek)
@ -113,8 +116,6 @@ SYSCALL_HANDLED(238, migrate_pages)
SYSCALL_HANDLED(239, move_pages)
#ifdef PERF_ENABLE
SYSCALL_HANDLED(241, perf_event_open)
#else // PERF_ENABLE
SYSCALL_DELEGATED(241, perf_event_open)
#endif // PERF_ENABLE
SYSCALL_HANDLED(260, wait4)
SYSCALL_HANDLED(270, process_vm_readv)
@ -141,8 +142,9 @@ SYSCALL_HANDLED(804, resume_threads)
SYSCALL_HANDLED(811, linux_spawn)
SYSCALL_DELEGATED(1024, open)
SYSCALL_DELEGATED(1026, unlink)
SYSCALL_DELEGATED(1035, readlink)
SYSCALL_HANDLED(1045, signalfd)
SYSCALL_DELEGATED(1049, stat)
SYSCALL_DELEGATED(1060, getpgrp)
SYSCALL_HANDLED(1062, time)
SYSCALL_DELEGATED(1062, time)

View File

@ -1,4 +1,4 @@
/* sysreg.h COPYRIGHT FUJITSU LIMITED 2016-2018 */
/* sysreg.h COPYRIGHT FUJITSU LIMITED 2016-2017 */
/*
* Macros for accessing system registers with older binutils.
*
@ -23,7 +23,6 @@
#include <types.h>
#include <stringify.h>
#include <ihk/types.h>
/*
* ARMv8 ARM reserves the following encoding for system registers:
@ -57,6 +56,12 @@
#define sys_reg_CRm(id) (((id) >> CRm_shift) & CRm_mask)
#define sys_reg_Op2(id) (((id) >> Op2_shift) & Op2_mask)
#ifdef __ASSEMBLY__
#define __emit_inst(x).inst (x)
#else
#define __emit_inst(x)".inst " __stringify((x)) "\n\t"
#endif
#define SYS_MIDR_EL1 sys_reg(3, 0, 0, 0, 0)
#define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5)
#define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6)
@ -138,12 +143,6 @@
#define ID_AA64ISAR0_SHA1_SHIFT 8
#define ID_AA64ISAR0_AES_SHIFT 4
/* id_aa64isar1 */
#define ID_AA64ISAR1_LRCPC_SHIFT 20
#define ID_AA64ISAR1_FCMA_SHIFT 16
#define ID_AA64ISAR1_JSCVT_SHIFT 12
#define ID_AA64ISAR1_DPB_SHIFT 0
/* id_aa64pfr0 */
#define ID_AA64PFR0_SVE_SHIFT 32
#define ID_AA64PFR0_GIC_SHIFT 24
@ -179,14 +178,6 @@
#define ID_AA64MMFR0_TGRAN64_SUPPORTED 0x0
#define ID_AA64MMFR0_TGRAN16_NI 0x0
#define ID_AA64MMFR0_TGRAN16_SUPPORTED 0x1
#define ID_AA64MMFR0_PARANGE_48 0x5
#define ID_AA64MMFR0_PARANGE_52 0x6
#ifdef CONFIG_ARM64_PA_BITS_52
#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52
#else
#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_48
#endif
/* id_aa64mmfr1 */
#define ID_AA64MMFR1_PAN_SHIFT 20
@ -273,46 +264,15 @@
/* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */
#define SYS_MPIDR_SAFE_VAL (1UL << 31)
/* SYS_MIDR_EL1 */
//mask
#define SYS_MIDR_EL1_IMPLEMENTER_MASK (0xFFUL)
#define SYS_MIDR_EL1_PPNUM_MASK (0xFFFUL)
//shift
#define SYS_MIDR_EL1_IMPLEMENTER_SHIFT (24)
#define SYS_MIDR_EL1_PPNUM_SHIFT (0x4)
//val
#define SYS_MIDR_EL1_IMPLEMENTER_FJ (0x46)
#define SYS_MIDR_EL1_PPNUM_TCHIP (0x1)
#define READ_ACCESS (0)
#define WRITE_ACCESS (1)
#define ACCESS_REG_FUNC(name, reg) \
static void xos_access_##name(uint8_t flag, uint64_t *reg_value) \
{ \
if (flag == READ_ACCESS) { \
__asm__ __volatile__("mrs_s %0," __stringify(reg) "\n\t" \
:"=&r"(*reg_value)::); \
} \
else if (flag == WRITE_ACCESS) { \
__asm__ __volatile__("msr_s" __stringify(reg) ", %0\n\t" \
::"r"(*reg_value):); \
} else { \
; \
} \
}
#define XOS_FALSE (0)
#define XOS_TRUE (1)
#ifdef __ASSEMBLY__
#define __emit_inst(x).inst (x)
.irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
.equ .L__reg_num_x\num, \num
.endr
.equ .L__reg_num_xzr, 31
.macro mrs_s, rt, sreg
__emit_inst(0xd5200000|(\sreg)|(.L__reg_num_\rt))
__emit_inst(0xd5200000|(\sreg)|(.L__reg_num_\rt))
.endm
.macro msr_s, sreg, rt
@ -320,7 +280,7 @@
.endm
#else
#define __emit_inst(x)".inst " __stringify((x)) "\n\t"
asm(
" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n"
" .equ .L__reg_num_x\\num, \\num\n"
@ -336,28 +296,6 @@ asm(
" .endm\n"
);
ACCESS_REG_FUNC(midr_el1, SYS_MIDR_EL1);
static int xos_is_tchip(void)
{
uint64_t reg = 0;
int ret = 0, impl = 0, part = 0;
xos_access_midr_el1(READ_ACCESS, &reg);
impl = (reg >> SYS_MIDR_EL1_IMPLEMENTER_SHIFT) &
SYS_MIDR_EL1_IMPLEMENTER_MASK;
part = (reg >> SYS_MIDR_EL1_PPNUM_SHIFT) & SYS_MIDR_EL1_PPNUM_MASK;
if ((impl == SYS_MIDR_EL1_IMPLEMENTER_FJ) &&
(part == SYS_MIDR_EL1_PPNUM_TCHIP)) {
ret = XOS_TRUE;
}
else {
ret = XOS_FALSE;
}
return ret;
}
#endif
/*
@ -398,6 +336,4 @@ static int xos_is_tchip(void)
/* @ref.impl arch/arm64/include/asm/kvm_arm.h */
#define CPTR_EL2_TZ (1 << 8)
#include "imp-sysreg.h"
#endif /* __ASM_SYSREG_H */

View File

@ -1,22 +1,15 @@
/* thread_info.h COPYRIGHT FUJITSU LIMITED 2015-2018 */
/* thread_info.h COPYRIGHT FUJITSU LIMITED 2015-2017 */
#ifndef __HEADER_ARM64_COMMON_THREAD_INFO_H
#define __HEADER_ARM64_COMMON_THREAD_INFO_H
#define MIN_KERNEL_STACK_SHIFT 15
#include <arch-memory.h>
#if (MIN_KERNEL_STACK_SHIFT < PAGE_SHIFT)
#define KERNEL_STACK_SHIFT PAGE_SHIFT
#else
#define KERNEL_STACK_SHIFT MIN_KERNEL_STACK_SHIFT
#endif
#define KERNEL_STACK_SIZE (UL(1) << KERNEL_STACK_SHIFT)
#define KERNEL_STACK_SIZE 32768 /* 8 page */
#define THREAD_START_SP KERNEL_STACK_SIZE - 16
#ifndef __ASSEMBLY__
#define ALIGN_UP(x, align) ALIGN_DOWN((x) + (align) - 1, align)
#define ALIGN_DOWN(x, align) ((x) & ~((align) - 1))
#include <process.h>
#include <prctl.h>
@ -60,8 +53,8 @@ struct thread_info {
struct arm64_cpu_local_thread {
struct thread_info thread_info;
unsigned long paniced;
uint64_t panic_regs[34];
unsigned long paniced; /* 136 */
uint64_t panic_regs[34]; /* 144 */
};
union arm64_cpu_local_variables {

View File

@ -1,22 +1,8 @@
/* virt.h COPYRIGHT FUJITSU LIMITED 2015-2017 */
/* virt.h COPYRIGHT FUJITSU LIMITED 2015 */
#ifndef __HEADER_ARM64_COMMON_VIRT_H
#define __HEADER_ARM64_COMMON_VIRT_H
/* @ref.impl linux-v4.15-rc3 arch/arm64/include/asm/virt.h */
#define BOOT_CPU_MODE_EL1 (0xe11)
#define BOOT_CPU_MODE_EL2 (0xe12)
#ifndef __ASSEMBLY__
#include <sysreg.h>
#include <ptrace.h>
/* @ref.impl linux-v4.15-rc3 arch/arm64/include/asm/virt.h */
static inline int is_kernel_in_hyp_mode(void)
{
return read_sysreg(CurrentEL) == CurrentEL_EL2;
}
#endif /* !__ASSEMBLY__ */
#endif /* !__HEADER_ARM64_COMMON_VIRT_H */

View File

@ -1,15 +1,13 @@
/* irq-gic-v2.c COPYRIGHT FUJITSU LIMITED 2015-2018 */
/* irq-gic-v2.c COPYRIGHT FUJITSU LIMITED 2015-2016 */
#include <ihk/cpu.h>
#include <irq.h>
#include <arm-gic-v2.h>
#include <io.h>
#include <arch/cpu.h>
#include <memory.h>
#include <affinity.h>
#include <syscall.h>
#include <debug.h>
#include <arch-timer.h>
#include <cls.h>
// #define DEBUG_GICV2
@ -54,11 +52,17 @@ static void arm64_raise_sgi_gicv2(unsigned int cpuid, unsigned int vector)
* arm64_raise_spi_gicv2
* @ref.impl nothing.
*/
extern unsigned int ihk_ikc_irq_apicid;
static void arm64_raise_spi_gicv2(unsigned int cpuid, unsigned int vector)
{
uint64_t spi_reg_offset;
uint32_t spi_set_pending_bitpos;
if (cpuid != ihk_ikc_irq_apicid) {
ekprintf("SPI(irq#%d) cannot send other than the host.\n", vector);
return;
}
/**
* calculates register offset and bit position corresponding to the numbers.
*
@ -105,9 +109,8 @@ extern int interrupt_from_user(void *);
void handle_interrupt_gicv2(struct pt_regs *regs)
{
unsigned int irqstat, irqnr;
const int from_user = interrupt_from_user(regs);
set_cputime(from_user ? CPUTIME_MODE_U2K : CPUTIME_MODE_K2K_IN);
set_cputime(interrupt_from_user(regs)? 1: 2);
do {
// get GICC_IAR.InterruptID
irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
@ -127,13 +130,7 @@ void handle_interrupt_gicv2(struct pt_regs *regs)
*/
break;
} while (1);
set_cputime(from_user ? CPUTIME_MODE_K2U : CPUTIME_MODE_K2K_OUT);
/* for migration by IPI */
if (get_this_cpu_local_var()->flags & CPU_FLAG_NEED_MIGRATE) {
schedule();
check_signal(0, regs, 0);
}
set_cputime(0);
}
void gic_dist_init_gicv2(unsigned long dist_base_pa, unsigned long size)
@ -150,6 +147,10 @@ void gic_enable_gicv2(void)
{
unsigned int enable_ppi_sgi = 0;
enable_ppi_sgi |= GICD_ENABLE << get_timer_intrid();
if (is_use_virt_timer()) {
enable_ppi_sgi |= GICD_ENABLE << get_virt_timer_intrid();
} else {
enable_ppi_sgi |= GICD_ENABLE << get_phys_timer_intrid();
}
writel_relaxed(enable_ppi_sgi, dist_base + GIC_DIST_ENABLE_SET);
}

View File

@ -1,4 +1,5 @@
/* irq-gic-v3.c COPYRIGHT FUJITSU LIMITED 2015-2018 */
/* irq-gic-v3.c COPYRIGHT FUJITSU LIMITED 2015-2017 */
#include <irq.h>
#include <arm-gic-v2.h>
#include <arm-gic-v3.h>
@ -7,8 +8,6 @@
#include <process.h>
#include <syscall.h>
#include <debug.h>
#include <arch-timer.h>
#include <cls.h>
//#define DEBUG_GICV3
@ -265,7 +264,6 @@ static void arm64_raise_spi_gicv3(uint32_t cpuid, uint32_t vector)
static void arm64_raise_lpi_gicv3(uint32_t cpuid, uint32_t vector)
{
// @todo.impl
ekprintf("%s called.\n", __func__);
}
void arm64_issue_ipi_gicv3(uint32_t cpuid, uint32_t vector)
@ -283,7 +281,7 @@ void arm64_issue_ipi_gicv3(uint32_t cpuid, uint32_t vector)
// send LPI (allow only to host)
arm64_raise_lpi_gicv3(cpuid, vector);
} else {
ekprintf("#%d is bad irq number.\n", vector);
ekprintf("#%d is bad irq number.", vector);
}
}
@ -291,11 +289,10 @@ extern int interrupt_from_user(void *);
void handle_interrupt_gicv3(struct pt_regs *regs)
{
uint64_t irqnr;
const int from_user = interrupt_from_user(regs);
irqnr = gic_read_iar();
cpu_enable_nmi();
set_cputime(from_user ? CPUTIME_MODE_U2K : CPUTIME_MODE_K2K_IN);
set_cputime(interrupt_from_user(regs)? 1: 2);
while (irqnr != ICC_IAR1_EL1_SPURIOUS) {
if ((irqnr < 1020) || (irqnr >= 8192)) {
gic_write_eoir(irqnr);
@ -303,51 +300,11 @@ void handle_interrupt_gicv3(struct pt_regs *regs)
}
irqnr = gic_read_iar();
}
set_cputime(from_user ? CPUTIME_MODE_K2U : CPUTIME_MODE_K2K_OUT);
/* for migration by IPI */
if (get_this_cpu_local_var()->flags & CPU_FLAG_NEED_MIGRATE) {
schedule();
check_signal(0, regs, 0);
}
}
static uint64_t gic_mpidr_to_affinity(unsigned long mpidr)
{
uint64_t aff;
aff = ((uint64_t)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
MPIDR_AFFINITY_LEVEL(mpidr, 0));
return aff;
}
static void init_spi_routing(uint32_t irq, uint32_t linux_cpu)
{
uint64_t spi_route_reg_val, spi_route_reg_offset;
if (irq < 32 || 1020 <= irq) {
ekprintf("%s: irq is not spi number. (irq=%d)\n",
__func__, irq);
return;
}
/* write to GICD_IROUTER */
spi_route_reg_offset = irq * 8;
spi_route_reg_val = gic_mpidr_to_affinity(cpu_logical_map(linux_cpu));
writeq_relaxed(spi_route_reg_val,
(void *)(dist_base + GICD_IROUTER +
spi_route_reg_offset));
set_cputime(0);
}
void gic_dist_init_gicv3(unsigned long dist_base_pa, unsigned long size)
{
extern int spi_table[];
extern int nr_spi_table;
int i;
dist_base = map_fixed_area(dist_base_pa, size, 1 /*non chachable*/);
#ifdef USE_CAVIUM_THUNDER_X
@ -356,14 +313,6 @@ void gic_dist_init_gicv3(unsigned long dist_base_pa, unsigned long size)
is_cavium_thunderx = 1;
}
#endif
/* initialize spi routing */
for (i = 0; i < nr_spi_table; i++) {
if (spi_table[i] == -1) {
continue;
}
init_spi_routing(spi_table[i], i);
}
}
void gic_cpu_init_gicv3(unsigned long cpu_base_pa, unsigned long size)
@ -400,23 +349,11 @@ void gic_enable_gicv3(void)
void *rd_sgi_base = rbase + 0x10000 /* SZ_64K */;
int i;
unsigned int enable_ppi_sgi = GICD_INT_EN_SET_SGI;
extern int ihk_param_nr_pmu_irq_affi;
extern int ihk_param_pmu_irq_affi[CONFIG_SMP_MAX_CORES];
enable_ppi_sgi |= GICD_ENABLE << get_timer_intrid();
if (0 < ihk_param_nr_pmu_irq_affi) {
for (i = 0; i < ihk_param_nr_pmu_irq_affi; i++) {
if ((0 <= ihk_param_pmu_irq_affi[i]) &&
(ihk_param_pmu_irq_affi[i] <
sizeof(enable_ppi_sgi) * BITS_PER_BYTE)) {
enable_ppi_sgi |= GICD_ENABLE <<
ihk_param_pmu_irq_affi[i];
}
}
}
else {
enable_ppi_sgi |= GICD_ENABLE << INTRID_PERF_OVF;
if (is_use_virt_timer()) {
enable_ppi_sgi |= GICD_ENABLE << get_virt_timer_intrid();
} else {
enable_ppi_sgi |= GICD_ENABLE << get_phys_timer_intrid();
}
/*
@ -429,10 +366,9 @@ void gic_enable_gicv3(void)
/*
* Set priority on PPI and SGI interrupts
*/
for (i = 0; i < 32; i += 4) {
for (i = 0; i < 32; i += 4)
writel_relaxed(GICD_INT_DEF_PRI_X4,
rd_sgi_base + GIC_DIST_PRI + i);
}
rd_sgi_base + GIC_DIST_PRI + i * 4 / 4);
/* sync wait */
gic_do_wait_for_rwp(rbase);
@ -468,12 +404,9 @@ void gic_enable_gicv3(void)
gic_write_bpr1(0);
/* Set specific IPI to NMI */
writeb_relaxed(GICD_INT_NMI_PRI,
rd_sgi_base + GIC_DIST_PRI + INTRID_CPU_STOP);
writeb_relaxed(GICD_INT_NMI_PRI,
rd_sgi_base + GIC_DIST_PRI + INTRID_MULTI_NMI);
writeb_relaxed(GICD_INT_NMI_PRI,
rd_sgi_base + GIC_DIST_PRI + INTRID_STACK_TRACE);
writeb_relaxed(GICD_INT_NMI_PRI, rd_sgi_base + GIC_DIST_PRI + INTRID_CPU_STOP);
writeb_relaxed(GICD_INT_NMI_PRI, rd_sgi_base + GIC_DIST_PRI + INTRID_MEMDUMP);
writeb_relaxed(GICD_INT_NMI_PRI, rd_sgi_base + GIC_DIST_PRI + INTRID_STACK_TRACE);
/* sync wait */
gic_do_wait_for_rwp(rbase);

View File

@ -1,4 +1,4 @@
/* local.c COPYRIGHT FUJITSU LIMITED 2015-2018 */
/* local.c COPYRIGHT FUJITSU LIMITED 2015-2016 */
#include <cpulocal.h>
#include <ihk/atomic.h>
#include <ihk/mm.h>
@ -7,31 +7,24 @@
#include <registers.h>
#include <string.h>
#define LOCALS_SPAN (8 * PAGE_SIZE)
/* BSP initialized stack area */
union arm64_cpu_local_variables init_thread_info __attribute__((aligned(KERNEL_STACK_SIZE)));
/* BSP/AP idle stack pointer head */
static union arm64_cpu_local_variables *locals;
size_t arm64_cpu_local_variables_span = KERNEL_STACK_SIZE; /* for debugger */
size_t arm64_cpu_local_variables_span = LOCALS_SPAN; /* for debugger */
/* allocate & initialize BSP/AP idle stack */
void init_processors_local(int max_id)
{
int i = 0;
const int sz = (max_id + 1) * KERNEL_STACK_SIZE;
union arm64_cpu_local_variables *tmp;
const int npages = ((max_id + 1) *
(ALIGN_UP(KERNEL_STACK_SIZE, PAGE_SIZE) >>
PAGE_SHIFT));
if (npages < 1) {
panic("idle kernel stack allocation failed.");
}
/* allocate one more for alignment */
locals = ihk_mc_alloc_pages(npages, IHK_MC_AP_CRITICAL);
if (locals == NULL) {
panic("idle kernel stack allocation failed.");
}
locals = ihk_mc_alloc_pages(((sz + PAGE_SIZE - 1) / PAGE_SIZE), IHK_MC_AP_CRITICAL);
locals = (union arm64_cpu_local_variables *)ALIGN_UP((unsigned long)locals, KERNEL_STACK_SIZE);
/* clear struct process, struct process_vm, struct thread_info area */

File diff suppressed because it is too large Load Diff

View File

@ -19,7 +19,7 @@ int ihk_mc_ikc_init_first_local(struct ihk_ikc_channel_desc *channel,
memset(channel, 0, sizeof(struct ihk_ikc_channel_desc));
mikc_queue_pages = ((4 * num_processors * MASTER_IKCQ_PKTSIZE)
mikc_queue_pages = ((2 * num_processors * MASTER_IKCQ_PKTSIZE)
+ (PAGE_SIZE - 1)) / PAGE_SIZE;
/* Place both sides in this side */

View File

@ -1,4 +1,4 @@
/* perfctr.c COPYRIGHT FUJITSU LIMITED 2015-2018 */
/* perfctr.c COPYRIGHT FUJITSU LIMITED 2015-2017 */
#include <arch-perfctr.h>
#include <ihk/perfctr.h>
#include <mc_perf_event.h>
@ -6,60 +6,32 @@
#include <ihk/debug.h>
#include <registers.h>
#include <string.h>
#include <ihk/mm.h>
#include <irq.h>
/*
* @ref.impl arch/arm64/kernel/perf_event.c
* Set at runtime when we know what CPU type we are.
*/
struct arm_pmu cpu_pmu;
extern int ihk_param_pmu_irq_affi[CONFIG_SMP_MAX_CORES];
extern int ihk_param_nr_pmu_irq_affi;
extern int ihk_param_pmu_irq_affiniry[CONFIG_SMP_MAX_CORES];
extern int ihk_param_nr_pmu_irq_affiniry;
int arm64_init_perfctr(void)
{
int ret;
int i;
int pages;
const struct ihk_mc_cpu_info *cpu_info;
memset(&cpu_pmu, 0, sizeof(cpu_pmu));
ret = armv8pmu_init(&cpu_pmu);
if (ret) {
if (!ret) {
return ret;
}
cpu_info = ihk_mc_get_cpu_info();
pages = (sizeof(struct per_cpu_arm_pmu) * cpu_info->ncpus +
PAGE_SIZE - 1) >> PAGE_SHIFT;
cpu_pmu.per_cpu = ihk_mc_alloc_pages(pages, IHK_MC_AP_NOWAIT);
if (cpu_pmu.per_cpu == NULL) {
return -ENOMEM;
}
memset(cpu_pmu.per_cpu, 0, pages * PAGE_SIZE);
if (0 < ihk_param_nr_pmu_irq_affi) {
for (i = 0; i < ihk_param_nr_pmu_irq_affi; i++) {
ret = ihk_mc_register_interrupt_handler(ihk_param_pmu_irq_affi[i],
cpu_pmu.handler);
if (ret) {
break;
}
}
}
else {
ret = ihk_mc_register_interrupt_handler(INTRID_PERF_OVF,
cpu_pmu.handler);
for (i = 0; i < ihk_param_nr_pmu_irq_affiniry; i++) {
ret = ihk_mc_register_interrupt_handler(ihk_param_pmu_irq_affiniry[i], cpu_pmu.handler);
}
return ret;
}
void arm64_init_per_cpu_perfctr(void)
{
armv8pmu_per_cpu_init(&cpu_pmu.per_cpu[ihk_mc_get_processor_id()]);
}
int arm64_enable_pmu(void)
{
int ret;
@ -75,21 +47,11 @@ void arm64_disable_pmu(void)
cpu_pmu.disable_pmu();
}
void arm64_enable_user_access_pmu_regs(void)
{
cpu_pmu.enable_user_access_pmu_regs();
}
void arm64_disable_user_access_pmu_regs(void)
{
cpu_pmu.disable_user_access_pmu_regs();
}
extern unsigned int *arm64_march_perfmap;
static int __ihk_mc_perfctr_init(int counter, uint32_t type, uint64_t config, int mode)
{
int ret = -1;
int ret;
unsigned long config_base = 0;
int mapping;
@ -99,17 +61,17 @@ static int __ihk_mc_perfctr_init(int counter, uint32_t type, uint64_t config, in
}
ret = cpu_pmu.disable_counter(counter);
if (ret < 0) {
if (!ret) {
return ret;
}
ret = cpu_pmu.enable_intens(counter);
if (ret < 0) {
if (!ret) {
return ret;
}
ret = cpu_pmu.set_event_filter(&config_base, mode);
if (ret) {
if (!ret) {
return ret;
}
config_base |= (unsigned long)mapping;
@ -133,47 +95,48 @@ int ihk_mc_perfctr_init(int counter, uint64_t config, int mode)
int ihk_mc_perfctr_start(unsigned long counter_mask)
{
int ret = 0, i;
int ret = 0;
int counter;
unsigned long counter_bit;
for (i = 0; i < sizeof(counter_mask) * BITS_PER_BYTE; i++) {
if (counter_mask & (1UL << i)) {
ret = cpu_pmu.enable_counter(i);
if (ret < 0) {
kprintf("%s: enable failed(idx=%d)\n",
__func__, i);
break;
}
}
for (counter = 0, counter_bit = 1;
counter_bit < counter_mask;
counter++, counter_bit <<= 1) {
if (!(counter_mask & counter_bit))
continue;
ret = cpu_pmu.enable_counter(counter_mask);
if (ret < 0)
break;
}
return ret;
return ret < 0 ? ret : 0;
}
int ihk_mc_perfctr_stop(unsigned long counter_mask, int flags)
int ihk_mc_perfctr_stop(unsigned long counter_mask)
{
int i = 0;
int ret = 0;
int counter;
unsigned long counter_bit;
for (i = 0; i < sizeof(counter_mask) * BITS_PER_BYTE; i++) {
if (!(counter_mask & (1UL << i)))
for (counter = 0, counter_bit = 1;
counter_bit < counter_mask;
counter++, counter_bit <<= 1) {
if (!(counter_mask & counter_bit))
continue;
int ret = 0;
ret = cpu_pmu.disable_counter(counter);
if (ret < 0)
break;
ret = cpu_pmu.disable_counter(i);
if (ret < 0) {
continue;
}
if (flags & IHK_MC_PERFCTR_DISABLE_INTERRUPT) {
// when ihk_mc_perfctr_start is called,
// ihk_mc_perfctr_init is also called so disable
// interrupt
ret = cpu_pmu.disable_intens(i);
if (ret < 0) {
continue;
}
}
// ihk_mc_perfctr_startが呼ばれるときには、
// init系関数が呼ばれるのでdisableにする。
ret = cpu_pmu.disable_intens(counter);
if (ret < 0)
break;
}
return 0;
return ret < 0 ? ret : 0;
}
int ihk_mc_perfctr_reset(int counter)
@ -183,7 +146,8 @@ int ihk_mc_perfctr_reset(int counter)
return 0;
}
int ihk_mc_perfctr_set(int counter, long val)
//int ihk_mc_perfctr_set(int counter, unsigned long val)
int ihk_mc_perfctr_set(int counter, long val) /* 0416_patchtemp */
{
// TODO[PMU]: 共通部でサンプリングレートの計算をして、設定するカウンタ値をvalに渡してくるようになると想定。サンプリングレートの扱いを見てから本実装。
uint32_t v = val;
@ -205,45 +169,17 @@ unsigned long ihk_mc_perfctr_read(int counter)
return count;
}
int ihk_mc_perfctr_alloc_counter(unsigned int *type, unsigned long *config,
unsigned long pmc_status)
//int ihk_mc_perfctr_alloc_counter(unsigned long pmc_status)
int ihk_mc_perfctr_alloc_counter(unsigned int *type, unsigned long *config, unsigned long pmc_status) /* 0416_patchtemp */
{
int ret;
if (*type == PERF_TYPE_HARDWARE) {
switch (*config) {
case PERF_COUNT_HW_INSTRUCTIONS:
ret = cpu_pmu.map_event(*type, *config);
if (ret < 0) {
return -1;
}
*type = PERF_TYPE_RAW;
break;
default:
// Unexpected config
return -1;
}
}
else if (*type != PERF_TYPE_RAW) {
return -1;
}
ret = cpu_pmu.get_event_idx(get_per_cpu_pmu()->num_events, pmc_status,
*config);
ret = cpu_pmu.get_event_idx(cpu_pmu.num_events, pmc_status);
return ret;
}
int ihk_mc_perf_counter_mask_check(unsigned long counter_mask)
/* 0416_patchtemp */
/* ihk_mc_perfctr_fixed_init() stub added. */
int ihk_mc_perfctr_fixed_init(int counter, int mode)
{
return 1;
}
int ihk_mc_perf_get_num_counters(void)
{
return cpu_pmu.per_cpu[ihk_mc_get_processor_id()].num_events;
}
int ihk_mc_perfctr_set_extra(struct mc_perf_event *event)
{
/* Nothing to do. */
return 0;
return -1;
}

File diff suppressed because it is too large Load Diff

View File

@ -30,7 +30,7 @@
*/
#if defined(CONFIG_HAS_NMI)
#include <arm-gic-v3.h>
ENTRY(__cpu_do_idle)
ENTRY(cpu_do_idle)
mrs x0, daif // save I bit
msr daifset, #2 // set I bit
mrs_s x1, ICC_PMR_EL1 // save PMR
@ -41,13 +41,13 @@ ENTRY(__cpu_do_idle)
msr_s ICC_PMR_EL1, x1 // restore PMR
msr daif, x0 // restore I bit
ret
ENDPROC(__cpu_do_idle)
ENDPROC(cpu_do_idle)
#else /* defined(CONFIG_HAS_NMI) */
ENTRY(__cpu_do_idle)
ENTRY(cpu_do_idle)
dsb sy // WFI may enter a low-power mode
wfi
ret
ENDPROC(__cpu_do_idle)
ENDPROC(cpu_do_idle)
#endif /* defined(CONFIG_HAS_NMI) */
/*

View File

@ -1,4 +1,4 @@
/* psci.c COPYRIGHT FUJITSU LIMITED 2015-2018 */
/* psci.c COPYRIGHT FUJITSU LIMITED 2015-2016 */
/* @ref.impl arch/arm64/kernel/psci.c */
/*
* This program is free software; you can redistribute it and/or modify

View File

@ -1,4 +1,4 @@
/* ptrace.c COPYRIGHT FUJITSU LIMITED 2016-2018 */
/* ptrace.c COPYRIGHT FUJITSU LIMITED 2016-2017 */
#include <errno.h>
#include <debug-monitors.h>
#include <hw_breakpoint.h>
@ -23,6 +23,7 @@
#define NOT_IMPLEMENTED() do { kprintf("%s is not implemented\n", __func__); while(1);} while(0)
extern void save_debugreg(unsigned long *debugreg);
extern unsigned long do_kill(struct thread *thread, int pid, int tid, int sig, struct siginfo *info, int ptracecont);
extern int interrupt_from_user(void *);
enum aarch64_regset {
@ -947,32 +948,23 @@ void ptrace_report_signal(struct thread *thread, int sig)
}
mcs_rwlock_writer_lock(&proc->update_lock, &lock);
if (!(thread->ptrace & PT_TRACED)) {
if(!(proc->ptrace & PT_TRACED)){
mcs_rwlock_writer_unlock(&proc->update_lock, &lock);
return;
}
/* Transition thread state */
thread->exit_status = sig;
/* Transition thread state */
proc->status = PS_DELAY_TRACED;
thread->status = PS_TRACED;
thread->ptrace &= ~PT_TRACE_SYSCALL;
save_debugreg(thread->ptrace_debugreg);
proc->ptrace &= ~PT_TRACE_SYSCALL;
if (sig == SIGSTOP || sig == SIGTSTP ||
sig == SIGTTIN || sig == SIGTTOU) {
thread->signal_flags |= SIGNAL_STOP_STOPPED;
}
else {
thread->signal_flags &= ~SIGNAL_STOP_STOPPED;
}
if (thread == proc->main_thread) {
proc->status = PS_DELAY_TRACED;
parent_pid = proc->parent->pid;
}
else {
parent_pid = thread->report_proc->pid;
waitq_wakeup(&thread->report_proc->waitpid_q);
sig == SIGTTIN || sig == SIGTTOU) {
proc->signal_flags |= SIGNAL_STOP_STOPPED;
} else {
proc->signal_flags &= ~SIGNAL_STOP_STOPPED;
}
parent_pid = proc->parent->pid;
save_debugreg(thread->ptrace_debugreg);
mcs_rwlock_writer_unlock(&proc->update_lock, &lock);
memset(&info, '\0', sizeof info);

File diff suppressed because it is too large Load Diff

View File

@ -1,201 +0,0 @@
/* timer.c COPYRIGHT FUJITSU LIMITED 2018 */
#include <ihk/types.h>
#include <ihk/cpu.h>
#include <ihk/lock.h>
#include <sysreg.h>
#include <kmalloc.h>
#include <cls.h>
#include <cputype.h>
#include <irq.h>
#include <arch-timer.h>
#include <debug.h>
//#define DEBUG_PRINT_TIMER
#ifdef DEBUG_PRINT_TIMER
#undef DDEBUG_DEFAULT
#define DDEBUG_DEFAULT DDEBUG_PRINT
#endif
static unsigned int per_cpu_timer_val[NR_CPUS] = { 0 };
static int timer_intrid = INTRID_VIRT_TIMER;
static void arch_timer_virt_reg_write(enum arch_timer_reg reg, uint32_t val);
static void (*arch_timer_reg_write)(enum arch_timer_reg, uint32_t) =
arch_timer_virt_reg_write;
static uint32_t arch_timer_virt_reg_read(enum arch_timer_reg reg);
static uint32_t (*arch_timer_reg_read)(enum arch_timer_reg) =
arch_timer_virt_reg_read;
static void arch_timer_phys_reg_write(enum arch_timer_reg reg, uint32_t val)
{
switch (reg) {
case ARCH_TIMER_REG_CTRL:
write_sysreg(val, cntp_ctl_el0);
break;
case ARCH_TIMER_REG_TVAL:
write_sysreg(val, cntp_tval_el0);
break;
}
isb();
}
static void arch_timer_virt_reg_write(enum arch_timer_reg reg, uint32_t val)
{
switch (reg) {
case ARCH_TIMER_REG_CTRL:
write_sysreg(val, cntv_ctl_el0);
break;
case ARCH_TIMER_REG_TVAL:
write_sysreg(val, cntv_tval_el0);
break;
}
isb();
}
static uint32_t arch_timer_phys_reg_read(enum arch_timer_reg reg)
{
uint32_t val = 0;
switch (reg) {
case ARCH_TIMER_REG_CTRL:
val = read_sysreg(cntp_ctl_el0);
break;
case ARCH_TIMER_REG_TVAL:
val = read_sysreg(cntp_tval_el0);
break;
}
return val;
}
static uint32_t arch_timer_virt_reg_read(enum arch_timer_reg reg)
{
uint32_t val = 0;
switch (reg) {
case ARCH_TIMER_REG_CTRL:
val = read_sysreg(cntv_ctl_el0);
break;
case ARCH_TIMER_REG_TVAL:
val = read_sysreg(cntv_tval_el0);
break;
}
return val;
}
static void timer_handler(void *priv)
{
unsigned long ctrl;
const int cpu = ihk_mc_get_processor_id();
dkprintf("CPU%d: catch %s timer\n", cpu,
((timer_intrid == INTRID_PHYS_TIMER) ||
(timer_intrid == INTRID_HYP_PHYS_TIMER)) ?
"physical" : "virtual");
ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
const unsigned int clocks = per_cpu_timer_val[cpu];
struct cpu_local_var *v = get_this_cpu_local_var();
unsigned long irqstate;
/* set resched flag */
irqstate = ihk_mc_spinlock_lock(&v->runq_lock);
v->flags |= CPU_FLAG_NEED_RESCHED;
ihk_mc_spinlock_unlock(&v->runq_lock, irqstate);
/* gen control register value */
ctrl &= ~ARCH_TIMER_CTRL_IT_STAT;
/* set timer re-enable for periodic */
arch_timer_reg_write(ARCH_TIMER_REG_TVAL, clocks);
arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
}
}
static unsigned long is_use_virt_timer(void)
{
extern unsigned long ihk_param_use_virt_timer;
switch (ihk_param_use_virt_timer) {
case 0: /* physical */
case 1: /* virtual */
break;
default: /* invalid */
panic("PANIC: is_use_virt_timer(): timer select neither phys-timer nor virt-timer.\n");
break;
}
return ihk_param_use_virt_timer;
}
static struct ihk_mc_interrupt_handler timer_interrupt_handler = {
.func = timer_handler,
.priv = NULL,
};
/* other source use functions */
struct ihk_mc_interrupt_handler *get_timer_handler(void)
{
return &timer_interrupt_handler;
}
void
lapic_timer_enable(unsigned int clocks)
{
unsigned long ctrl = 0;
/* gen control register value */
ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~(ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_IT_STAT);
arch_timer_reg_write(ARCH_TIMER_REG_TVAL, clocks);
arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
per_cpu_timer_val[ihk_mc_get_processor_id()] = clocks;
}
void
lapic_timer_disable()
{
unsigned long ctrl = 0;
ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
per_cpu_timer_val[ihk_mc_get_processor_id()] = 0;
}
int get_timer_intrid(void)
{
return timer_intrid;
}
void arch_timer_init(void)
{
const unsigned long is_virt = is_use_virt_timer();
#ifdef CONFIG_ARM64_VHE
const unsigned long mmfr = read_cpuid(ID_AA64MMFR1_EL1);
#endif /* CONFIG_ARM64_VHE */
if (is_virt) {
timer_intrid = INTRID_VIRT_TIMER;
arch_timer_reg_write = arch_timer_virt_reg_write;
arch_timer_reg_read = arch_timer_virt_reg_read;
} else {
timer_intrid = INTRID_PHYS_TIMER;
arch_timer_reg_write = arch_timer_phys_reg_write;
arch_timer_reg_read = arch_timer_phys_reg_read;
}
#ifdef CONFIG_ARM64_VHE
if ((mmfr >> ID_AA64MMFR1_VHE_SHIFT) & 1UL) {
if (is_virt) {
timer_intrid = INTRID_HYP_VIRT_TIMER;
} else {
timer_intrid = INTRID_HYP_PHYS_TIMER;
}
}
#endif /* CONFIG_ARM64_VHE */
}

View File

@ -1,4 +1,4 @@
/* traps.c COPYRIGHT FUJITSU LIMITED 2015-2018 */
/* traps.c COPYRIGHT FUJITSU LIMITED 2015-2017 */
#include <ihk/context.h>
#include <ihk/debug.h>
#include <traps.h>
@ -29,14 +29,12 @@ void arm64_notify_die(const char *str, struct pt_regs *regs, struct siginfo *inf
*/
void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
{
const int from_user = interrupt_from_user(regs);
// /* TODO: implement lazy context saving/restoring */
set_cputime(from_user ? CPUTIME_MODE_U2K : CPUTIME_MODE_K2K_IN);
set_cputime(1);
// WARN_ON(1);
kprintf("WARNING: CPU: %d PID: %d Trapped FP/ASIMD access.\n",
ihk_mc_get_processor_id(), cpu_local_var(current)->proc->pid);
set_cputime(from_user ? CPUTIME_MODE_K2U : CPUTIME_MODE_K2K_OUT);
set_cputime(0);
}
/*
@ -53,9 +51,7 @@ void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
{
siginfo_t info;
unsigned int si_code = 0;
const int from_user = interrupt_from_user(regs);
set_cputime(from_user ? CPUTIME_MODE_U2K : CPUTIME_MODE_K2K_IN);
set_cputime(1);
if (esr & FPEXC_IOF)
si_code = FPE_FLTINV;
@ -74,7 +70,7 @@ void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
info._sifields._sigfault.si_addr = (void*)regs->pc;
set_signal(SIGFPE, regs, &info);
set_cputime(from_user ? CPUTIME_MODE_K2U : CPUTIME_MODE_K2K_OUT);
set_cputime(0);
}
/* @ref.impl arch/arm64/kernel/traps.c */
@ -137,9 +133,8 @@ exit:
void do_undefinstr(struct pt_regs *regs)
{
siginfo_t info;
const int from_user = interrupt_from_user(regs);
set_cputime(from_user ? CPUTIME_MODE_U2K : CPUTIME_MODE_K2K_IN);
set_cputime(interrupt_from_user(regs)? 1: 2);
if (call_undef_hook(regs) == 0) {
goto out;
@ -152,7 +147,7 @@ void do_undefinstr(struct pt_regs *regs)
arm64_notify_die("Oops - undefined instruction", regs, &info, 0);
out:
set_cputime(from_user ? CPUTIME_MODE_K2U : CPUTIME_MODE_K2K_OUT);
set_cputime(0);
}
/*
@ -162,9 +157,7 @@ out:
void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
{
siginfo_t info;
const int from_user = interrupt_from_user(regs);
set_cputime(from_user ? CPUTIME_MODE_U2K : CPUTIME_MODE_K2K_IN);
set_cputime(interrupt_from_user(regs)? 1: 2);
kprintf("entering bad_mode !! (regs:0x%p, reason:%d, esr:0x%x)\n", regs, reason, esr);
kprintf("esr Analyse:\n");
@ -180,5 +173,5 @@ void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
info._sifields._sigfault.si_addr = (void*)regs->pc;
arm64_notify_die("Oops - bad mode", regs, &info, 0);
set_cputime(from_user ? CPUTIME_MODE_K2U : CPUTIME_MODE_K2K_OUT);
set_cputime(0);
}

View File

@ -1,4 +1,4 @@
/* vdso.c COPYRIGHT FUJITSU LIMITED 2016-2018 */
/* vdso.c COPYRIGHT FUJITSU LIMITED 2016 */
/* @ref.impl arch/arm64/kernel/vdso.c */
#include <arch-memory.h>
@ -88,8 +88,26 @@ int arch_setup_vdso(void)
kprintf("Enable Host mapping vDSO.\n");
return 0;
}
kprintf("Enable McK mapping vDSO.\n");
panic("Only support host mapping vDSO");
if (memcmp(&vdso_start, "\177ELF", 4)) {
panic("vDSO is not a valid ELF object!\n");
}
vdso.vdso_npages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
dkprintf("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
vdso.vdso_npages + 1, vdso.vdso_npages, &vdso_start, 1L, &tod_data);
if (vdso.vdso_npages != 1) {
panic("vDSO is not a valid number of pages!\n");
}
vdso.vvar_phys = virt_to_phys((void *)&tod_data);
vdso.vdso_physlist[0] = virt_to_phys((void *)&vdso_start);
vdso.lbase = VDSO_LBASE;
vdso.offset_sigtramp = vdso_offset_sigtramp;
return 0;
}
static int get_free_area(struct process_vm *vm, size_t len, intptr_t hint,
@ -138,7 +156,6 @@ int arch_map_vdso(struct process_vm *vm)
unsigned long start, end;
unsigned long flag;
int ret;
struct vm_range *range;
vdso_text_len = vdso.vdso_npages << PAGE_SHIFT;
/* Be sure to map the data page */
@ -157,7 +174,7 @@ int arch_map_vdso(struct process_vm *vm)
flag = VR_REMOTE | VR_PROT_READ;
flag |= VRFLAG_PROT_TO_MAXPROT(flag);
ret = add_process_memory_range(vm, start, end, vdso.vvar_phys, flag,
NULL, 0, PAGE_SHIFT, &range);
NULL, 0, PAGE_SHIFT, NULL);
if (ret != 0){
dkprintf("ERROR: adding memory range for tod_data\n");
goto exit;
@ -169,7 +186,7 @@ int arch_map_vdso(struct process_vm *vm)
flag = VR_REMOTE | VR_PROT_READ | VR_PROT_EXEC;
flag |= VRFLAG_PROT_TO_MAXPROT(flag);
ret = add_process_memory_range(vm, start, end, vdso.vdso_physlist[0], flag,
NULL, 0, PAGE_SHIFT, &range);
NULL, 0, PAGE_SHIFT, NULL);
if (ret != 0) {
dkprintf("ERROR: adding memory range for vdso_text\n");

View File

@ -0,0 +1,33 @@
/* vdso.so.S COPYRIGHT FUJITSU LIMITED 2016 */
/* @ref.impl arch/arm64/kernel/vdso/vdso.S */
/*
* Copyright (C) 2012 ARM Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* Author: Will Deacon <will.deacon@arm.com>
*/
#include <arch-memory.h>
#include <vdso-so-path.h>
.section ".vdso.txet", "aw"
.globl vdso_start, vdso_end
.balign PAGE_SIZE
vdso_start:
.incbin VDSO_SO_PATH
.balign PAGE_SIZE
vdso_end:
.previous

View File

@ -0,0 +1,131 @@
# Makefile.in COPYRIGHT FUJITSU LIMITED 2016
# @ref.impl arch/arm64/kernel/vdso/Makefile
# Building a vDSO image for AArch64.
HOST_DIR=@KDIR@
HOST_CONFIG=$(HOST_DIR)/.config
HOST_KERNEL_CONFIG_ARM64_4K_PAGES=$(shell grep -E "^CONFIG_ARM64_4K_PAGES=y" $(HOST_CONFIG) | sed 's|CONFIG_ARM64_4K_PAGES=||g')
HOST_KERNEL_CONFIG_ARM64_16K_PAGES=$(shell grep -E "^CONFIG_ARM64_16K_PAGES=y" $(HOST_CONFIG) | sed 's|CONFIG_ARM64_16K_PAGES=||g')
HOST_KERNEL_CONFIG_ARM64_64K_PAGES=$(shell grep -E "^CONFIG_ARM64_64K_PAGES=y" $(HOST_CONFIG) | sed 's|CONFIG_ARM64_64K_PAGES=||g')
VDSOSRC = @abs_srcdir@
VDSOBUILD = @abs_builddir@
INCDIR = $(VDSOSRC)/../include
ECHO_SUFFIX = [VDSO]
VDSOOBJS := gettimeofday.o
DESTOBJS = $(addprefix $(VDSOBUILD)/, $(VDSOOBJS))
VDSOASMOBJS := note.o sigreturn.o
DESTASMOBJS = $(addprefix $(VDSOBUILD)/, $(VDSOASMOBJS))
$(if $(VDSOSRC),,$(error IHK output directory is not specified))
$(if $(TARGET),,$(error Target is not specified))
#CFLAGS := -nostdinc -mlittle-endian -Wall -mabi=lp64 -Wa,-gdwarf-2
CFLAGS := -nostdinc -mlittle-endian -Wall -Wa,-gdwarf-2
CFLAGS += -D__KERNEL__ -I$(SRC)/include
CFLAGS += -I$(SRC)/../lib/include -I$(INCDIR) -I$(IHKBASE)/smp/arm64/include
CFLAGS += $(foreach i, $(shell seq 1 100), $(addprefix -DPOSTK_DEBUG_ARCH_DEP_, $(i)))
CFLAGS += $(foreach i, $(shell seq 1 100), $(addprefix -DPOSTK_DEBUG_TEMP_FIX_, $(i)))
LDFLAGS := -nostdinc -mlittle-endian -Wall -Wundef -Wstrict-prototypes
LDFLAGS += -Wno-trigraphs -fno-strict-aliasing -fno-common
LDFLAGS += -Werror-implicit-function-declaration -Wno-format-security
#LDFLAGS += -std=gnu89 -mgeneral-regs-only -mabi=lp64 -O2
LDFLAGS += -std=gnu89 -mgeneral-regs-only -O2
LDFLAGS += -Wframe-larger-than=2048 -fno-stack-protector
LDFLAGS += -fno-delete-null-pointer-checks -Wno-unused-but-set-variable
LDFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
LDFLAGS += -fno-var-tracking-assignments -g -Wdeclaration-after-statement
LDFLAGS += -Wno-pointer-sign -fno-strict-overflow -fconserve-stack
LDFLAGS += -Werror=implicit-int -Werror=strict-prototypes -Werror=date-time
LDFLAGS += -shared -fno-common -fno-builtin -nostdlib
LDFLAGS += -Wl,-soname=linux-vdso.so.1 -Wl,--hash-style=sysv -Wl,-n -Wl,-T
LDFLAGS += --param=allow-store-data-races=0 -DCC_HAVE_ASM_GOTO
LDFLAGS += -D"KBUILD_STR(s)=\#s" -D"KBUILD_BASENAME=KBUILD_STR(vdso.so)"
LDFLAGS += -D"KBUILD_MODNAME=KBUILD_STR(vdso.so)" -D__KERNEL__
DEPSRCS = $(wildcard $(VDSOSRC)/*.c $(VDSOSRC)/*.S)
CFLAGS_lds := -E -P -C -U$(ARCH)
CFLAGS_lds += -nostdinc
CFLAGS_lds += -mlittle-endian
CFLAGS_lds += -D__KERNEL__
CFLAGS_lds += -D__ASSEMBLY__
CFLAGS_lds += -DLINKER_SCRIPT
CFLAGS_lds += -DVDSO_LBASE=0
ifeq ($(HOST_KERNEL_CONFIG_ARM64_4K_PAGES), y)
CFLAGS_lds += -DPAGE_SIZE=0x1000
endif
ifeq ($(HOST_KERNEL_CONFIG_ARM64_16K_PAGES), y)
CFLAGS_lds += -DPAGE_SIZE=0x4000
endif
ifeq ($(HOST_KERNEL_CONFIG_ARM64_64K_PAGES), y)
CFLAGS_lds += -DPAGE_SIZE=0x10000
endif
#load mckernel config (append CPPFLAGS)
include @abs_top_builddir@/../ihk/cokernel/$(TARGETDIR)/Makefile.predefines
default: all
.PHONY: all clean depend prepare
all: depend $(VDSOBUILD)/vdso.so $(VDSOBUILD)/../include/vdso-offsets.h $(VDSOBUILD)/../include/vdso-so-path.h
# Strip rule for the .so file
$(VDSOBUILD)/vdso.so: OBJCOPYFLAGS := -S
$(VDSOBUILD)/vdso.so: $(VDSOBUILD)/vdso.so.dbg
$(objcopy_cmd)
# Generate VDSO offsets using helper script
$(VDSOBUILD)/../include/vdso-offsets.h: $(VDSOBUILD)/vdso.so.dbg
$(call echo_cmd,VDSOSYM,$<)
@mkdir -p $(VDSOBUILD)/../include
@nm $< | sh $(VDSOSRC)/gen_vdso_offsets.sh | LC_ALL=C sort > $@
$(VDSOBUILD)/../include/vdso-so-path.h:
@echo "#define VDSO_SO_PATH \"@abs_builddir@/vdso.so\"" > $@
# Link rule for the .so file, .lds has to be first
$(VDSOBUILD)/vdso.so.dbg: $(VDSOBUILD)/vdso.lds $(DESTOBJS) $(DESTASMOBJS)
$(ld_cmd)
$(VDSOBUILD)/vdso.lds: $(VDSOSRC)/vdso.lds.S
$(lds_cmd)
clean:
$(rm_cmd) $(DESTOBJS) $(DESTASMOBJS) $(VDSOBUILD)/Makefile.dep $(VDSOBUILD)/vdso.* -r $(VDSOBUILD)/../include
depend: $(VDSOBUILD)/Makefile.dep
$(VDSOBUILD)/Makefile.dep:
$(call dep_cmd,$(DEPSRCS))
prepare:
@$(RM) $(VDSOBUILD)/Makefile.dep
-include $(VDSOBUILD)/Makefile.dep
# Actual build commands
ifeq ($(V),1)
echo_cmd =
submake = make
else
echo_cmd = @echo ' ($(TARGET))' $1 $(ECHO_SUFFIX) $2;
submake = make --no-print-directory
endif
cc_cmd = $(call echo_cmd,CC,$<)$(CC) $(CFLAGS) -c -o $@
ld_cmd = $(call echo_cmd,LD,$@)$(CC) $(LDFLAGS) $^ -o $@
dep_cmd = $(call echo_cmd,DEPEND,)$(CC) $(CFLAGS) -MM $1 > $@
rm_cmd = $(call echo_cmd,CLEAN,)$(RM)
objcopy_cmd = $(call echo_cmd,OBJCOPY,$<)$(OBJCOPY) $(OBJCOPYFLAGS) $< $@
lds_cmd = $(call echo_cmd,LDS,$<)$(CC) $(CFLAGS_lds) -c -o $@ $<
$(DESTOBJS):
$(cc_cmd) $(addprefix $(VDSOSRC)/, $(notdir $(@:.o=.c)))
$(DESTASMOBJS):
$(cc_cmd) $(addprefix $(VDSOSRC)/, $(notdir $(@:.o=.S))) -D__ASSEMBLY__

View File

@ -0,0 +1,17 @@
#!/bin/sh
# gen_vdso_offsets.sh COPYRIGHT FUJITSU LIMITED 2016
# @ref.impl arch/arm64/kernel/vdso/gen_vdso_offsets.sh
#
# Match symbols in the DSO that look like VDSO_*; produce a header file
# of constant offsets into the shared object.
#
# Doing this inside the Makefile will break the $(filter-out) function,
# causing Kbuild to rebuild the vdso-offsets header file every time.
#
# Author: Will Deacon <will.deacon@arm.com
#
LC_ALL=C
sed -n -e 's/^00*/0/' -e \
's/^\([0-9a-fA-F]*\) . VDSO_\([a-zA-Z0-9_]*\)$/\#define vdso_offset_\2\t0x\1/p'

View File

@ -0,0 +1,207 @@
/* gettimeofday.c COPYRIGHT FUJITSU LIMITED 2016 */
#include <affinity.h>
#include <arch-memory.h>
#include <time.h>
#include <syscall.h>
#include <registers.h>
#include <ihk/atomic.h>
extern int __kernel_gettimeofday(struct timeval *tv, void *tz);
static inline void cpu_pause_for_vsyscall(void)
{
asm volatile ("yield" ::: "memory");
return;
}
static inline void calculate_time_from_tsc(struct timespec *ts,
struct tod_data_s *tod_data)
{
long ver;
unsigned long current_tsc;
__time_t sec_delta;
long ns_delta;
for (;;) {
while ((ver = ihk_atomic64_read(&tod_data->version)) & 1) {
/* settimeofday() is in progress */
cpu_pause_for_vsyscall();
}
rmb();
*ts = tod_data->origin;
rmb();
if (ver == ihk_atomic64_read(&tod_data->version)) {
break;
}
/* settimeofday() has intervened */
cpu_pause_for_vsyscall();
}
current_tsc = rdtsc();
sec_delta = current_tsc / tod_data->clocks_per_sec;
ns_delta = NS_PER_SEC * (current_tsc % tod_data->clocks_per_sec)
/ tod_data->clocks_per_sec;
/* calc. of ns_delta overflows if clocks_per_sec exceeds 18.44 GHz */
ts->tv_sec += sec_delta;
ts->tv_nsec += ns_delta;
if (ts->tv_nsec >= NS_PER_SEC) {
ts->tv_nsec -= NS_PER_SEC;
++ts->tv_sec;
}
return;
}
static inline struct tod_data_s *get_tod_data_addr(void)
{
unsigned long addr;
asm volatile("adr %0, _tod_data\n"
: "=r" (addr)
:
: "memory");
return (struct tod_data_s *)addr;
}
int __kernel_gettimeofday(struct timeval *tv, void *tz)
{
long ret;
struct tod_data_s *tod_data;
struct timespec ats;
if(!tv && !tz) {
/* nothing to do */
return 0;
}
tod_data = get_tod_data_addr();
/* DO it locally if supported */
if (!tz && tod_data->do_local) {
calculate_time_from_tsc(&ats, tod_data);
tv->tv_sec = ats.tv_sec;
tv->tv_usec = ats.tv_nsec / 1000;
return 0;
}
/* Otherwize syscall */
asm volatile("mov w8, %w1\n"
"mov x0, %2\n"
"mov x1, %3\n"
"svc #0\n"
"mov %0, x0\n"
: "=r" (ret)
: "r" (__NR_gettimeofday), "r"(tv), "r"(tz)
: "memory");
if (ret) {
*(int *)0 = 0; /* i.e. raise(SIGSEGV) */
}
return (int)ret;
}
/*
* The IDs of the various system clocks (for POSIX.1b interval timers):
* @ref.impl include/uapi/linux/time.h
*/
// #define CLOCK_REALTIME 0
// #define CLOCK_MONOTONIC 1
// #define CLOCK_PROCESS_CPUTIME_ID 2
// #define CLOCK_THREAD_CPUTIME_ID 3
#define CLOCK_MONOTONIC_RAW 4
#define CLOCK_REALTIME_COARSE 5
#define CLOCK_MONOTONIC_COARSE 6
#define CLOCK_BOOTTIME 7
#define CLOCK_REALTIME_ALARM 8
#define CLOCK_BOOTTIME_ALARM 9
#define CLOCK_SGI_CYCLE 10 /* Hardware specific */
#define CLOCK_TAI 11
#define HIGH_RES_NSEC 1 /* nsec. */
#define CLOCK_REALTIME_RES HIGH_RES_NSEC
#define CLOCK_COARSE_RES ((NS_PER_SEC+CONFIG_HZ/2)/CONFIG_HZ) /* 10,000,000 nsec*/
typedef int clockid_t;
int __kernel_clock_gettime(clockid_t clk_id, struct timespec *tp)
{
long ret;
struct tod_data_s *tod_data;
struct timespec ats;
if (!tp) {
/* nothing to do */
return 0;
}
tod_data = get_tod_data_addr();
/* DO it locally if supported */
if (tod_data->do_local && clk_id == CLOCK_REALTIME) {
calculate_time_from_tsc(&ats, tod_data);
tp->tv_sec = ats.tv_sec;
tp->tv_nsec = ats.tv_nsec;
return 0;
}
/* Otherwize syscall */
asm volatile("mov w8, %w1\n"
"mov x0, %2\n"
"mov x1, %3\n"
"svc #0\n"
"mov %0, x0\n"
: "=r" (ret)
: "r" (__NR_clock_gettime), "r"(clk_id), "r"(tp)
: "memory");
return (int)ret;
}
int __kernel_clock_getres(clockid_t clk_id, struct timespec *res)
{
long ret;
if (!res) {
/* nothing to do */
return 0;
}
switch (clk_id) {
case CLOCK_REALTIME:
case CLOCK_MONOTONIC:
res->tv_sec = 0;
res->tv_nsec = CLOCK_REALTIME_RES;
return 0;
break;
case CLOCK_REALTIME_COARSE:
case CLOCK_MONOTONIC_COARSE:
res->tv_sec = 0;
res->tv_nsec = CLOCK_COARSE_RES;
return 0;
break;
default:
break;
}
/* Otherwise syscall */
asm volatile("mov w8, %w1\n"
"mov x0, %2\n"
"mov x1, %3\n"
"svc #0\n"
"mov %0, x0\n"
: "=r" (ret)
: "r" (__NR_clock_getres), "r"(clk_id), "r"(res)
: "memory");
return (int)ret;
}

View File

@ -0,0 +1,28 @@
/* note.S COPYRIGHT FUJITSU LIMITED 2016 */
/* @ref.impl arch/arm64/kernel/vdso/note.S */
/*
* Copyright (C) 2012 ARM Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* Author: Will Deacon <will.deacon@arm.com>
*
* This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
* Here we can supply some information useful to userland.
*/
#include <elfnote.h>
ELFNOTE_START(McKernel, 0, "a")
.long 0x10000 /* MCKERNEL_VERSION_CODE */
ELFNOTE_END

View File

@ -0,0 +1,39 @@
/* sigreturn.S COPYRIGHT FUJITSU LIMITED 2016 */
/* @ref.impl arch/arm64/kernel/vdso/sigreturn.S */
/*
* Sigreturn trampoline for returning from a signal when the SA_RESTORER
* flag is not set.
*
* Copyright (C) 2012 ARM Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* Author: Will Deacon <will.deacon@arm.com>
*/
#include <linkage.h>
#include "syscall.h"
.text
nop
ENTRY(__kernel_rt_sigreturn)
.cfi_startproc
.cfi_signal_frame
.cfi_def_cfa x29, 0
.cfi_offset x29, 0 * 8
.cfi_offset x30, 1 * 8
mov x8, #__NR_rt_sigreturn
svc #0
.cfi_endproc
ENDPROC(__kernel_rt_sigreturn)

View File

@ -0,0 +1,15 @@
/* syscall.h COPYRIGHT FUJITSU LIMITED 2016 */
#ifndef __HEADER_ARM64_VDSO_SYSCALL_H
#define __HEADER_ARM64_VDSO_SYSCALL_H
#define DECLARATOR(number,name) .equ __NR_##name, number
#define SYSCALL_HANDLED(number,name) DECLARATOR(number,name)
#define SYSCALL_DELEGATED(number,name) DECLARATOR(number,name)
#include <syscall_list.h>
#undef DECLARATOR
#undef SYSCALL_HANDLED
#undef SYSCALL_DELEGATED
#endif /* !__HEADER_ARM64_VDSO_SYSCALL_H */

View File

@ -0,0 +1,96 @@
/* vdso.lds.S COPYRIGHT FUJITSU LIMITED 2016 */
/* @ref.impl arch/arm64/kernel/vdso/vdso.lds.S */
/*
* GNU linker script for the VDSO library.
*
* Copyright (C) 2012 ARM Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* Author: Will Deacon <will.deacon@arm.com>
* Heavily based on the vDSO linker scripts for other archs.
*/
OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64", "elf64-littleaarch64")
OUTPUT_ARCH(aarch64)
SECTIONS
{
PROVIDE(_tod_data = . - PAGE_SIZE);
. = VDSO_LBASE + SIZEOF_HEADERS;
.hash : { *(.hash) } :text
.gnu.hash : { *(.gnu.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
.note : { *(.note.*) } :text :note
. = ALIGN(16);
.text : { *(.text*) } :text =0xd503201f
PROVIDE (__etext = .);
PROVIDE (_etext = .);
PROVIDE (etext = .);
.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
.eh_frame : { KEEP (*(.eh_frame)) } :text
.dynamic : { *(.dynamic) } :text :dynamic
.rodata : { *(.rodata*) } :text
_end = .;
PROVIDE(end = .);
/DISCARD/ : {
*(.note.GNU-stack)
*(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
}
}
/*
* We must supply the ELF program headers explicitly to get just one
* PT_LOAD segment, and set the flags explicitly to make segments read-only.
*/
PHDRS
{
text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
note PT_NOTE FLAGS(4); /* PF_R */
eh_frame_hdr PT_GNU_EH_FRAME;
}
/*
* This controls what symbols we export from the DSO.
*/
VERSION
{
LINUX_2.6.39 {
global:
__kernel_rt_sigreturn;
__kernel_gettimeofday;
__kernel_clock_gettime;
__kernel_clock_getres;
local: *;
};
}
/*
* Make the sigreturn code visible to the kernel.
*/
VDSO_sigtramp = __kernel_rt_sigreturn;

View File

@ -1,2 +1,2 @@
IHK_OBJS += cpu.o interrupt.o memory.o trampoline.o local.o context.o
IHK_OBJS += perfctr.o syscall.o vsyscall.o coredump.o
IHK_OBJS += perfctr.o syscall.o vsyscall.o

View File

@ -1,4 +1,4 @@
/* coredump.c COPYRIGHT FUJITSU LIMITED 2018 */
#ifdef POSTK_DEBUG_ARCH_DEP_18 /* coredump arch separation. */
#include <process.h>
#include <elfcore.h>
@ -55,3 +55,5 @@ void arch_fill_prstatus(struct elf_prstatus64 *prstatus, struct thread *thread,
prstatus->pr_fpvalid = 0; /* We assume no fp */
}
#endif /* POSTK_DEBUG_ARCH_DEP_18 */

View File

@ -1,4 +1,3 @@
/* cpu.c COPYRIGHT FUJITSU LIMITED 2018 */
/**
* \file cpu.c
* License details are found in the file LICENSE.
@ -92,6 +91,7 @@ void x86_set_warm_reset(unsigned long ip, char *first_page_va);
void x86_init_perfctr(void);
int gettime_local_support = 0;
extern int ihk_mc_pt_print_pte(struct page_table *pt, void *virt);
extern int kprintf(const char *format, ...);
extern int interrupt_from_user(void *);
extern void perf_start(struct mc_perf_event *event);
@ -824,14 +824,11 @@ void call_ap_func(void (*next_func)(void))
next_func();
}
struct page_table *get_init_page_table(void);
void setup_x86_ap(void (*next_func)(void))
{
unsigned long rsp;
cpu_disable_interrupt();
ihk_mc_load_page_table(get_init_page_table());
assign_processor_id();
init_smp_processor();
@ -892,8 +889,7 @@ void handle_interrupt(int vector, struct x86_user_context *regs)
lapic_ack();
++v->in_interrupt;
set_cputime(interrupt_from_user(regs) ?
CPUTIME_MODE_U2K : CPUTIME_MODE_K2K_IN);
set_cputime(interrupt_from_user(regs)? 1: 2);
dkprintf("CPU[%d] got interrupt, vector: %d, RIP: 0x%lX\n",
ihk_mc_get_processor_id(), vector, regs->gpr.rip);
@ -1008,8 +1004,7 @@ void handle_interrupt(int vector, struct x86_user_context *regs)
}
interrupt_exit(regs);
set_cputime(interrupt_from_user(regs) ?
CPUTIME_MODE_K2U : CPUTIME_MODE_K2K_OUT);
set_cputime(interrupt_from_user(regs)? 0: 1);
--v->in_interrupt;
@ -1022,8 +1017,7 @@ void handle_interrupt(int vector, struct x86_user_context *regs)
void gpe_handler(struct x86_user_context *regs)
{
set_cputime(interrupt_from_user(regs) ?
CPUTIME_MODE_U2K : CPUTIME_MODE_K2K_IN);
set_cputime(interrupt_from_user(regs)? 1: 2);
kprintf("General protection fault (err: %lx, %lx:%lx)\n",
regs->gpr.error, regs->gpr.cs, regs->gpr.rip);
arch_show_interrupt_context(regs);
@ -1032,8 +1026,7 @@ void gpe_handler(struct x86_user_context *regs)
}
set_signal(SIGSEGV, regs, NULL);
interrupt_exit(regs);
set_cputime(interrupt_from_user(regs) ?
CPUTIME_MODE_K2U : CPUTIME_MODE_K2K_OUT);
set_cputime(interrupt_from_user(regs)? 0: 1);
panic("GPF");
}
@ -1043,8 +1036,7 @@ void debug_handler(struct x86_user_context *regs)
int si_code = 0;
struct siginfo info;
set_cputime(interrupt_from_user(regs) ?
CPUTIME_MODE_U2K : CPUTIME_MODE_K2K_IN);
set_cputime(interrupt_from_user(regs)? 1: 2);
#ifdef DEBUG_PRINT_CPU
kprintf("debug exception (err: %lx, %lx:%lx)\n",
regs->gpr.error, regs->gpr.cs, regs->gpr.rip);
@ -1063,16 +1055,14 @@ void debug_handler(struct x86_user_context *regs)
info.si_code = si_code;
set_signal(SIGTRAP, regs, &info);
interrupt_exit(regs);
set_cputime(interrupt_from_user(regs) ?
CPUTIME_MODE_K2U : CPUTIME_MODE_K2K_OUT);
set_cputime(interrupt_from_user(regs)? 0: 1);
}
void int3_handler(struct x86_user_context *regs)
{
struct siginfo info;
set_cputime(interrupt_from_user(regs) ?
CPUTIME_MODE_U2K : CPUTIME_MODE_K2K_IN);
set_cputime(interrupt_from_user(regs)? 1: 2);
#ifdef DEBUG_PRINT_CPU
kprintf("int3 exception (err: %lx, %lx:%lx)\n",
regs->gpr.error, regs->gpr.cs, regs->gpr.rip);
@ -1083,8 +1073,59 @@ void int3_handler(struct x86_user_context *regs)
info.si_code = TRAP_BRKPT;
set_signal(SIGTRAP, regs, &info);
interrupt_exit(regs);
set_cputime(interrupt_from_user(regs) ?
CPUTIME_MODE_K2U : CPUTIME_MODE_K2K_OUT);
set_cputime(interrupt_from_user(regs)? 0: 1);
}
void
unhandled_page_fault(struct thread *thread, void *fault_addr, void *regs)
{
const uintptr_t address = (uintptr_t)fault_addr;
struct process_vm *vm = thread->vm;
struct vm_range *range;
unsigned long irqflags;
unsigned long error = ((struct x86_user_context *)regs)->gpr.error;
irqflags = kprintf_lock();
__kprintf("Page fault for 0x%lx\n", address);
__kprintf("%s for %s access in %s mode (reserved bit %s set), "
"it %s an instruction fetch\n",
(error & PF_PROT ? "protection fault" : "no page found"),
(error & PF_WRITE ? "write" : "read"),
(error & PF_USER ? "user" : "kernel"),
(error & PF_RSVD ? "was" : "wasn't"),
(error & PF_INSTR ? "was" : "wasn't"));
range = lookup_process_memory_range(vm, address, address+1);
if (range) {
__kprintf("address is in range, flag: 0x%lx\n",
range->flag);
ihk_mc_pt_print_pte(vm->address_space->page_table, (void*)address);
} else {
__kprintf("address is out of range! \n");
}
kprintf_unlock(irqflags);
/* TODO */
ihk_mc_debug_show_interrupt_context(regs);
if (!(error & PF_USER)) {
panic("panic: kernel mode PF");
}
//dkprintf("now dump a core file\n");
//coredump(proc, regs);
#ifdef DEBUG_PRINT_MEM
{
uint64_t *sp = (void *)REGS_GET_STACK_POINTER(regs);
kprintf("*rsp:%lx,*rsp+8:%lx,*rsp+16:%lx,*rsp+24:%lx,\n",
sp[0], sp[1], sp[2], sp[3]);
}
#endif
return;
}
static void outb(uint8_t v, uint16_t port)
@ -1249,7 +1290,7 @@ void ihk_mc_set_page_fault_handler(void (*h)(void *, uint64_t, void *))
}
extern char trampoline_code_data[], trampoline_code_data_end[];
struct page_table *get_boot_page_table(void);
struct page_table *get_init_page_table(void);
unsigned long get_transit_page_table(void);
/* reusable, but not reentrant */
@ -1273,10 +1314,9 @@ void ihk_mc_boot_cpu(int cpuid, unsigned long pc)
memcpy(p, trampoline_code_data,
trampoline_code_data_end - trampoline_code_data);
p[1] = (unsigned long)virt_to_phys(get_boot_page_table());
p[1] = (unsigned long)virt_to_phys(get_init_page_table());
p[2] = (unsigned long)setup_x86_ap;
p[3] = pc;
p[4] = (unsigned long)get_x86_cpu_local_kstack(cpuid);
p[6] = (unsigned long)get_transit_page_table();
if (!p[6]) {
p[6] = p[1];
@ -1394,11 +1434,13 @@ long ihk_mc_show_cpuinfo(char *buf, size_t buf_size, unsigned long read_off, int
}
#endif /* POSTK_DEBUG_ARCH_DEP_42 */
#ifdef POSTK_DEBUG_ARCH_DEP_23 /* add arch dep. clone_thread() function */
void arch_clone_thread(struct thread *othread, unsigned long pc,
unsigned long sp, struct thread *nthread)
{
return;
}
#endif /* POSTK_DEBUG_ARCH_DEP_23 */
void ihk_mc_print_user_context(ihk_mc_user_context_t *uctx)
{
@ -1502,8 +1544,7 @@ void arch_print_pre_interrupt_stack(const struct x86_basic_regs *regs) {
__print_stack(rbp, regs->rip);
}
void arch_print_stack(void)
{
void arch_print_stack() {
struct stack *rbp;
__kprintf("Approximative stack trace:\n");
@ -1551,13 +1592,6 @@ return;
kprintf_unlock(irqflags);
}
void arch_cpu_stop(void)
{
while (1) {
cpu_halt();
}
}
/*@
@ behavior fs_base:
@ assumes type == IHK_ASR_X86_FS;

View File

@ -1,4 +1,4 @@
/* gencore.c COPYRIGHT FUJITSU LIMITED 2015-2019 */
#ifndef POSTK_DEBUG_ARCH_DEP_18 /* coredump arch separation. */
#include <ihk/debug.h>
#include <kmalloc.h>
#include <cls.h>
@ -6,6 +6,7 @@
#include <process.h>
#include <string.h>
#include <elfcore.h>
#include <debug.h>
#define align32(x) ((((x) + 3) / 4) * 4)
#define alignpage(x) ((((x) + (PAGE_SIZE) - 1) / (PAGE_SIZE)) * (PAGE_SIZE))
@ -23,8 +24,9 @@
#define GENCORE_RANGE_IS_INACCESSIBLE(range) \
((range->flag & (VR_RESERVED | VR_MEMTYPE_UC | VR_DONTDUMP)))
/* Generate a core file image, which consists of many chunks.
* Returns an allocated table, an etnry of which is a pair of the address
/*
* Generate a core file image, which consists of many chunks.
* Returns an allocated table, an etnry of which is a pair of the address
* of a chunk and its length.
*/
@ -41,14 +43,18 @@ void fill_elf_header(Elf64_Ehdr *eh, int segs)
eh->e_ident[EI_MAG1] = 'E';
eh->e_ident[EI_MAG2] = 'L';
eh->e_ident[EI_MAG3] = 'F';
eh->e_ident[EI_CLASS] = ELF_CLASS;
eh->e_ident[EI_DATA] = ELF_DATA;
eh->e_ident[EI_CLASS] = ELFCLASS64;
eh->e_ident[EI_DATA] = ELFDATA2LSB;
eh->e_ident[EI_VERSION] = El_VERSION;
eh->e_ident[EI_OSABI] = ELF_OSABI;
eh->e_ident[EI_ABIVERSION] = ELF_ABIVERSION;
eh->e_ident[EI_OSABI] = ELFOSABI_NONE;
eh->e_ident[EI_ABIVERSION] = El_ABIVERSION_NONE;
eh->e_type = ET_CORE;
eh->e_machine = ELF_ARCH;
#ifdef CONFIG_MIC
eh->e_machine = EM_K10M;
#else
eh->e_machine = EM_X86_64;
#endif
eh->e_version = EV_CURRENT;
eh->e_entry = 0; /* Do we really need this? */
eh->e_phoff = 64; /* fixed */
@ -69,32 +75,28 @@ void fill_elf_header(Elf64_Ehdr *eh, int segs)
int get_prstatus_size(void)
{
return sizeof(struct note) + align32(sizeof("CORE"))
return sizeof(struct note) + align32(sizeof("CORE"))
+ align32(sizeof(struct elf_prstatus64));
}
/**
* \brief Return the size of the prpsinfo entry of the NOTE segment.
*
*/
int get_prpsinfo_size(void)
{
return sizeof(struct note) + align32(sizeof("CORE"))
+ align32(sizeof(struct elf_prpsinfo64));
}
/**
* \brief Fill a prstatus structure.
*
* \param head A pointer to a note structure.
* \param proc A pointer to the current process structure.
* \param regs0 A pointer to a ihk_mc_user_context_t structure.
* \param thread A pointer to the current thread structure.
* \param regs0 A pointer to a x86_regs structure.
*/
void fill_prstatus(struct note *head, struct thread *thread, void *regs0)
{
void *name;
struct elf_prstatus64 *prstatus;
struct elf_prstatus64 *prstatus;
struct x86_user_context *uctx = regs0;
struct x86_basic_regs *regs = &uctx->gpr;
register unsigned long _r12 asm("r12");
register unsigned long _r13 asm("r13");
register unsigned long _r14 asm("r14");
register unsigned long _r15 asm("r15");
head->namesz = sizeof("CORE");
head->descsz = sizeof(struct elf_prstatus64);
@ -103,15 +105,68 @@ void fill_prstatus(struct note *head, struct thread *thread, void *regs0)
memcpy(name, "CORE", sizeof("CORE"));
prstatus = (struct elf_prstatus64 *)(name + align32(sizeof("CORE")));
arch_fill_prstatus(prstatus, thread, regs0);
/*
We ignore following entries for now.
struct elf_siginfo pr_info;
short int pr_cursig;
a8_uint64_t pr_sigpend;
a8_uint64_t pr_sighold;
pid_t pr_pid;
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
struct prstatus64_timeval pr_utime;
struct prstatus64_timeval pr_stime;
struct prstatus64_timeval pr_cutime;
struct prstatus64_timeval pr_cstime;
*/
prstatus->pr_reg[0] = _r15;
prstatus->pr_reg[1] = _r14;
prstatus->pr_reg[2] = _r13;
prstatus->pr_reg[3] = _r12;
prstatus->pr_reg[4] = regs->rbp;
prstatus->pr_reg[5] = regs->rbx;
prstatus->pr_reg[6] = regs->r11;
prstatus->pr_reg[7] = regs->r10;
prstatus->pr_reg[8] = regs->r9;
prstatus->pr_reg[9] = regs->r8;
prstatus->pr_reg[10] = regs->rax;
prstatus->pr_reg[11] = regs->rcx;
prstatus->pr_reg[12] = regs->rdx;
prstatus->pr_reg[13] = regs->rsi;
prstatus->pr_reg[14] = regs->rdi;
prstatus->pr_reg[15] = regs->rax; /* ??? */
prstatus->pr_reg[16] = regs->rip;
prstatus->pr_reg[17] = regs->cs;
prstatus->pr_reg[18] = regs->rflags;
prstatus->pr_reg[19] = regs->rsp;
prstatus->pr_reg[20] = regs->ss;
prstatus->pr_reg[21] = rdmsr(MSR_FS_BASE);
prstatus->pr_reg[22] = rdmsr(MSR_GS_BASE);
/* There is no ds, es, fs and gs. */
prstatus->pr_fpvalid = 0; /* We assume no fp */
}
/**
* \brief Return the size of the prpsinfo entry of the NOTE segment.
*
*/
int get_prpsinfo_size(void)
{
return sizeof(struct note) + align32(sizeof("CORE"))
+ align32(sizeof(struct elf_prpsinfo64));
}
/**
* \brief Fill a prpsinfo structure.
*
* \param head A pointer to a note structure.
* \param proc A pointer to the current process structure.
* \param regs A pointer to a ihk_mc_user_context_t structure.
* \param thread A pointer to the current thread structure.
* \param regs A pointer to a x86_regs structure.
*/
void fill_prpsinfo(struct note *head, struct thread *thread, void *regs)
@ -129,18 +184,20 @@ void fill_prpsinfo(struct note *head, struct thread *thread, void *regs)
prpsinfo->pr_state = thread->status;
prpsinfo->pr_pid = thread->proc->pid;
/* TODO: Fill the following fields:
* char pr_sname;
* char pr_zomb;
* char pr_nice;
* a8_uint64_t pr_flag;
* unsigned int pr_uid;
* unsigned int pr_gid;
* int pr_ppid, pr_pgrp, pr_sid;
* char pr_fname[16];
* char pr_psargs[ELF_PRARGSZ];
*/
}
/*
We leave most of the fields unfilled.
char pr_sname;
char pr_zomb;
char pr_nice;
a8_uint64_t pr_flag;
unsigned int pr_uid;
unsigned int pr_gid;
int pr_ppid, pr_pgrp, pr_sid;
char pr_fname[16];
char pr_psargs[ELF_PRARGSZ];
*/
}
/**
* \brief Return the size of the AUXV entry of the NOTE segment.
@ -149,7 +206,7 @@ void fill_prpsinfo(struct note *head, struct thread *thread, void *regs)
int get_auxv_size(void)
{
return sizeof(struct note) + align32(sizeof("CORE"))
return sizeof(struct note) + align32(sizeof("CORE"))
+ sizeof(unsigned long) * AUXV_LEN;
}
@ -157,8 +214,8 @@ int get_auxv_size(void)
* \brief Fill an AUXV structure.
*
* \param head A pointer to a note structure.
* \param proc A pointer to the current process structure.
* \param regs A pointer to a ihk_mc_user_context_t structure.
* \param thread A pointer to the current thread structure.
* \param regs A pointer to a x86_regs structure.
*/
void fill_auxv(struct note *head, struct thread *thread, void *regs)
@ -172,9 +229,8 @@ void fill_auxv(struct note *head, struct thread *thread, void *regs)
name = (void *) (head + 1);
memcpy(name, "CORE", sizeof("CORE"));
auxv = name + align32(sizeof("CORE"));
memcpy(auxv, thread->proc->saved_auxv,
sizeof(unsigned long) * AUXV_LEN);
}
memcpy(auxv, thread->proc->saved_auxv, sizeof(unsigned long) * AUXV_LEN);
}
/**
* \brief Return the size of the whole NOTE segment.
@ -191,8 +247,8 @@ int get_note_size(void)
* \brief Fill the NOTE segment.
*
* \param head A pointer to a note structure.
* \param proc A pointer to the current process structure.
* \param regs A pointer to a ihk_mc_user_context_t structure.
* \param thread A pointer to the current thread structure.
* \param regs A pointer to a x86_regs structure.
*/
void fill_note(void *note, struct thread *thread, void *regs)
@ -207,27 +263,34 @@ void fill_note(void *note, struct thread *thread, void *regs)
/**
* \brief Generate an image of the core file.
*
* \param proc A pointer to the current process structure.
* \param regs A pointer to a ihk_mc_user_context_t structure.
* \param thread A pointer to the current thread structure.
* \param regs A pointer to a x86_regs structure.
* \param coretable(out) An array of core chunks.
* \param chunks(out) Number of the entires of coretable.
*
* A core chunk is represented by a pair of a physical
* A core chunk is represented by a pair of a physical
* address of memory region and its size. If there are
* no corresponding physical address for a VM area
* no corresponding physical address for a VM area
* (an unallocated demand-paging page, e.g.), the address
* should be zero.
*/
int gencore(struct thread *thread, void *regs,
/*@
@ requires \valid(thread);
@ requires \valid(regs);
@ requires \valid(coretable);
@ requires \valid(chunks);
@ behavior success:
@ ensures \result == 0;
@ assigns coretable;
@ behavior failure:
@ ensures \result == -1;
@*/
int gencore(struct thread *thread, void *regs,
struct coretable **coretable, int *chunks)
{
struct coretable *ct = NULL;
#ifdef POSTK_DEBUG_TEMP_FIX_39
Elf64_Ehdr *eh = NULL;
#else
Elf64_Ehdr eh;
#endif /*POSTK_DEBUG_TEMP_FIX_39*/
Elf64_Phdr *ph = NULL;
void *note = NULL;
struct vm_range *range, *next;
@ -248,22 +311,20 @@ int gencore(struct thread *thread, void *regs,
while ((range = next)) {
next = next_process_memory_range(vm, range);
dkprintf("start:%lx end:%lx flag:%lx objoff:%lx\n",
dkprintf("start:%lx end:%lx flag:%lx objoff:%lx\n",
range->start, range->end, range->flag, range->objoff);
if (GENCORE_RANGE_IS_INACCESSIBLE(range)) {
continue;
}
/* We need a chunk for each page for a demand paging area.
* This can be optimized for spacial complexity but we would
* lose simplicity instead.
*/
This can be optimized for spacial complexity but we would
lose simplicity instead. */
if (range->flag & VR_DEMAND_PAGING) {
unsigned long p, phys;
int prevzero = 0;
for (p = range->start; p < range->end; p += PAGE_SIZE) {
if (ihk_mc_pt_virt_to_phys(thread->vm->address_space->page_table,
if (ihk_mc_pt_virt_to_phys(thread->vm->address_space->page_table,
(void *)p, &phys) != 0) {
prevzero = 1;
} else {
@ -285,36 +346,18 @@ int gencore(struct thread *thread, void *regs,
{
struct vm_regions region = thread->vm->region;
dkprintf("text: %lx-%lx\n", region.text_start,
region.text_end);
dkprintf("data: %lx-%lx\n", region.data_start,
region.data_end);
dkprintf("text: %lx-%lx\n", region.text_start, region.text_end);
dkprintf("data: %lx-%lx\n", region.data_start, region.data_end);
dkprintf("brk: %lx-%lx\n", region.brk_start, region.brk_end);
dkprintf("map: %lx-%lx\n", region.map_start, region.map_end);
dkprintf("stack: %lx-%lx\n", region.stack_start,
region.stack_end);
dkprintf("user: %lx-%lx\n\n", region.user_start,
region.user_end);
dkprintf("stack: %lx-%lx\n", region.stack_start, region.stack_end);
dkprintf("user: %lx-%lx\n\n", region.user_start, region.user_end);
}
dkprintf("now generate a core file image\n");
#ifdef POSTK_DEBUG_TEMP_FIX_39
eh = kmalloc(sizeof(*eh), IHK_MC_AP_NOWAIT);
if (eh == NULL) {
dkprintf("could not alloc a elf header table.\n");
goto fail;
}
#ifdef POSTK_DEBUG_TEMP_FIX_63 /* Add core table and elf header initialization */
memset(eh, 0, sizeof(*eh));
#endif /* POSTK_DEBUG_TEMP_FIX_63 */
offset += sizeof(*eh);
fill_elf_header(eh, segs);
#else
offset += sizeof(eh);
fill_elf_header(&eh, segs);
#endif /* POSTK_DEBUG_TEMP_FIX_39 */
/* program header table */
phsize = sizeof(Elf64_Phdr) * segs;
@ -386,19 +429,10 @@ int gencore(struct thread *thread, void *regs,
dkprintf("could not alloc a coretable.\n");
goto fail;
}
#ifdef POSTK_DEBUG_TEMP_FIX_63 /* Add core table and elf header initialization */
memset(ct, 0, sizeof(*ct));
#endif /* POSTK_DEBUG_TEMP_FIX_63 */
#ifdef POSTK_DEBUG_TEMP_FIX_39
ct[0].addr = virt_to_phys(eh); /* ELF header */
ct[0].len = 64;
dkprintf("coretable[0]: %lx@%lx(%lx)\n", ct[0].len, ct[0].addr, eh);
#else
ct[0].addr = virt_to_phys(&eh); /* ELF header */
ct[0].len = 64;
ct[0].len = 64;
dkprintf("coretable[0]: %lx@%lx(%lx)\n", ct[0].len, ct[0].addr, &eh);
#endif /* POSTK_DEBUG_TEMP_FIX_39 */
ct[1].addr = virt_to_phys(ph); /* program header table */
ct[1].len = phsize;
@ -418,23 +452,22 @@ int gencore(struct thread *thread, void *regs,
if (GENCORE_RANGE_IS_INACCESSIBLE(range)) {
continue;
}
if (range->flag & VR_DEMAND_PAGING) {
/* Just an ad hoc kluge. */
unsigned long p, start, phys;
int prevzero = 0;
unsigned long size = 0;
for (start = p = range->start;
for (start = p = range->start;
p < range->end; p += PAGE_SIZE) {
if (ihk_mc_pt_virt_to_phys(thread->vm->address_space->page_table,
if (ihk_mc_pt_virt_to_phys(thread->vm->address_space->page_table,
(void *)p, &phys) != 0) {
if (prevzero == 0) {
/* Start a new chunk */
/* We begin a new chunk */
size = PAGE_SIZE;
start = p;
} else {
/* Extend the previous chunk */
/* We extend the previous chunk */
size += PAGE_SIZE;
}
prevzero = 1;
@ -443,16 +476,15 @@ int gencore(struct thread *thread, void *regs,
/* Flush out an empty chunk */
ct[i].addr = 0;
ct[i].len = size;
dkprintf("coretable[%d]: %lx@%lx(%lx)\n",
i, ct[i].len,
ct[i].addr, start);
dkprintf("coretable[%d]: %lx@%lx(%lx)\n", i,
ct[i].len, ct[i].addr, start);
i++;
}
ct[i].addr = phys;
ct[i].len = PAGE_SIZE;
dkprintf("coretable[%d]: %lx@%lx(%lx)\n",
i, ct[i].len, ct[i].addr, p);
dkprintf("coretable[%d]: %lx@%lx(%lx)\n", i,
ct[i].len, ct[i].addr, p);
i++;
prevzero = 0;
}
@ -461,20 +493,17 @@ int gencore(struct thread *thread, void *regs,
/* An empty chunk */
ct[i].addr = 0;
ct[i].len = size;
dkprintf("coretable[%d]: %lx@%lx(%lx)\n",
i, ct[i].len, ct[i].addr, start);
dkprintf("coretable[%d]: %lx@%lx(%lx)\n", i,
ct[i].len, ct[i].addr, start);
i++;
}
}
} else {
if ((thread->vm->region.user_start <= range->start) &&
(range->end <= thread->vm->region.user_end)) {
if (ihk_mc_pt_virt_to_phys(thread->vm->address_space->page_table,
(void *)range->start,
&phys) != 0) {
dkprintf("could not convert user "
"virtual address %lx "
"to physical address\n",
range->start);
if (ihk_mc_pt_virt_to_phys(thread->vm->address_space->page_table,
(void *)range->start, &phys) != 0) {
dkprintf("could not convert user virtual address %lx"
"to physical address", range->start);
goto fail;
}
} else {
@ -482,7 +511,7 @@ int gencore(struct thread *thread, void *regs,
}
ct[i].addr = phys;
ct[i].len = range->end - range->start;
dkprintf("coretable[%d]: %lx@%lx(%lx)\n", i,
dkprintf("coretable[%d]: %lx@%lx(%lx)\n", i,
ct[i].len, ct[i].addr, range->start);
i++;
}
@ -491,10 +520,13 @@ int gencore(struct thread *thread, void *regs,
return 0;
fail:
kfree(ct);
kfree(ph);
kfree(note);
fail:
if (ct)
kfree(ct);
if (ph)
kfree(ph);
if (note)
kfree(note);
return -1;
}
@ -504,14 +536,15 @@ fail:
* \param coretable An array of core chunks.
*/
/*@
@ requires \valid(coretable);
@ assigns \nothing;
@*/
void freecore(struct coretable **coretable)
{
struct coretable *ct = *coretable;
kfree(phys_to_virt(ct[2].addr)); /* NOTE segment */
kfree(phys_to_virt(ct[1].addr)); /* ph */
#ifdef POSTK_DEBUG_TEMP_FIX_39
kfree(phys_to_virt(ct[0].addr)); /* eh */
#endif /*POSTK_DEBUG_TEMP_FIX_39*/
kfree(*coretable);
}
#endif /* !POSTK_DEBUG_ARCH_DEP_18 */

View File

@ -7,7 +7,6 @@
#include <ihk/cpu.h>
#include <ihk/atomic.h>
#include <lwk/compiler.h>
#include "config.h"
//#define DEBUG_SPINLOCK
//#define DEBUG_MCS_RWLOCK
@ -183,11 +182,7 @@ typedef struct mcs_lock_node {
unsigned long locked;
struct mcs_lock_node *next;
unsigned long irqsave;
#ifndef ENABLE_UBSAN
} __aligned(64) mcs_lock_node_t;
#else
} mcs_lock_node_t;
#endif
} __attribute__((aligned(64))) mcs_lock_node_t;
typedef mcs_lock_node_t mcs_lock_t;
@ -280,22 +275,14 @@ typedef struct mcs_rwlock_node {
char dmy1; // unused
char dmy2; // unused
struct mcs_rwlock_node *next;
#ifndef ENABLE_UBSAN
} __aligned(64) mcs_rwlock_node_t;
#else
} mcs_rwlock_node_t;
#endif
} __attribute__((aligned(64))) mcs_rwlock_node_t;
typedef struct mcs_rwlock_node_irqsave {
#ifndef SPINLOCK_IN_MCS_RWLOCK
struct mcs_rwlock_node node;
#endif
unsigned long irqsave;
#ifndef ENABLE_UBSAN
} __aligned(64) mcs_rwlock_node_irqsave_t;
#else
} mcs_rwlock_node_irqsave_t;
#endif
} __attribute__((aligned(64))) mcs_rwlock_node_irqsave_t;
typedef struct mcs_rwlock_lock {
#ifdef SPINLOCK_IN_MCS_RWLOCK
@ -304,11 +291,7 @@ typedef struct mcs_rwlock_lock {
struct mcs_rwlock_node reader; /* common reader lock */
struct mcs_rwlock_node *node; /* base */
#endif
#ifndef ENABLE_UBSAN
} __aligned(64) mcs_rwlock_lock_t;
#else
} mcs_rwlock_lock_t;
#endif
} __attribute__((aligned(64))) mcs_rwlock_lock_t;
static void
mcs_rwlock_init(struct mcs_rwlock_lock *lock)

View File

@ -1,4 +1,3 @@
/* arch-memory.h COPYRIGHT FUJITSU LIMITED 2018 */
/**
* \file arch-memomry.h
* License details are found in the file LICENSE.
@ -41,9 +40,8 @@
#define LARGE_PAGE_MASK (~((unsigned long)LARGE_PAGE_SIZE - 1))
#define LARGE_PAGE_P2ALIGN (LARGE_PAGE_SHIFT - PAGE_SHIFT)
#define USER_END 0x0000800000000000UL
#define LD_TASK_UNMAPPED_BASE 0x0000155555500000UL
#define TASK_UNMAPPED_BASE 0x00002AAAAAA00000UL
#define USER_END 0x0000800000000000UL
#define TASK_UNMAPPED_BASE 0x00002AAAAAA00000UL
/*
* Canonical negative addresses (i.e., the smallest kernel virtual address)
@ -162,10 +160,6 @@ typedef unsigned long pte_t;
#define PM_PRESENT PM_STATUS(4LL)
#define PM_SWAP PM_STATUS(2LL)
#define USER_STACK_PREPAGE_SIZE LARGE_PAGE_SIZE
#define USER_STACK_PAGE_MASK LARGE_PAGE_MASK
#define USER_STACK_PAGE_P2ALIGN LARGE_PAGE_P2ALIGN
#define USER_STACK_PAGE_SHIFT LARGE_PAGE_SHIFT
/* For easy conversion, it is better to be the same as architecture's ones */
enum ihk_mc_pt_attribute {
@ -338,93 +332,7 @@ static inline void pte_set_dirty(pte_t *ptep, size_t pgsize)
return;
}
static inline int pte_is_contiguous(pte_t *ptep)
{
return 0;
}
static inline int pgsize_is_contiguous(size_t pgsize)
{
return 0;
}
static inline int pgsize_to_tbllv(size_t pgsize)
{
switch (pgsize) {
case PTL1_SIZE: return 1;
case PTL2_SIZE: return 2;
case PTL3_SIZE: return 3;
case PTL4_SIZE: return 4;
default:
#if 0 /* XXX: workaround. cannot use panic() here */
panic("pgsize_to_tbllv");
#else
return 0;
#endif
}
return 0;
}
static inline size_t tbllv_to_pgsize(int level)
{
switch (level) {
case 1: return PTL1_SIZE;
case 2: return PTL2_SIZE;
case 3: return PTL3_SIZE;
case 4: return PTL4_SIZE;
default:
#if 0 /* XXX: workaround. cannot use panic() here */
panic("tbllv_to_pgsize");
#else
return 0;
#endif
}
return 0;
}
static inline size_t tbllv_to_contpgsize(int level)
{
return 0;
}
static inline int tbllv_to_contpgshift(int level)
{
return 0;
}
static inline pte_t *get_contiguous_head(pte_t *__ptep, size_t __pgsize)
{
return __ptep;
}
static inline pte_t *get_contiguous_tail(pte_t *__ptep, size_t __pgsize)
{
return __ptep;
}
static inline int split_contiguous_pages(pte_t *ptep, size_t pgsize)
{
return 0;
}
static inline int page_is_contiguous_head(pte_t *ptep, size_t pgsize)
{
return 0;
}
static inline int page_is_contiguous_tail(pte_t *ptep, size_t pgsize)
{
return 0;
}
struct page_table;
static inline void arch_adjust_allocate_page_size(struct page_table *pt,
uintptr_t fault_addr,
pte_t *ptep,
void **pgaddrp,
size_t *pgsizep)
{
}
void set_pte(pte_t *ppte, unsigned long phys, enum ihk_mc_pt_attribute attr);
pte_t *get_pte(struct page_table *pt, void *virt, enum ihk_mc_pt_attribute attr);

View File

@ -7,7 +7,7 @@
#define IHK_OS_PGSIZE_2MB 1
#define IHK_OS_PGSIZE_1GB 2
extern struct rusage_global rusage;
extern struct rusage_global *rusage;
static inline int rusage_pgsize_to_pgtype(size_t pgsize)
{

View File

@ -50,7 +50,6 @@ struct x86_cpu_local_variables {
struct x86_cpu_local_variables *get_x86_cpu_local_variable(int id);
struct x86_cpu_local_variables *get_x86_this_cpu_local(void);
void *get_x86_cpu_local_kstack(int id);
void *get_x86_this_cpu_kstack(void);

View File

@ -1,4 +1,4 @@
/* elf.h COPYRIGHT FUJITSU LIMITED 2018 */
#ifdef POSTK_DEBUG_ARCH_DEP_18 /* coredump arch separation. */
#ifndef __HEADER_X86_COMMON_ELF_H
#define __HEADER_X86_COMMON_ELF_H
@ -56,3 +56,4 @@ struct user_regs64_struct
typedef elf_greg64_t elf_gregset64_t[ELF_NGREG64];
#endif /* __HEADER_S64FX_COMMON_ELF_H */
#endif /* !POSTK_DEBUG_ARCH_DEP_18 */

View File

@ -0,0 +1,94 @@
#ifndef POSTK_DEBUG_ARCH_DEP_18 /* coredump arch separation. */
/*
* Structures and definitions for ELF core file.
* Extracted from
* System V Application Binary Interface - DRAFT - 10 June 2013,
* http://www.sco.com/developers/gabi/latest/contents.html
*/
typedef uint16_t Elf64_Half;
typedef uint32_t Elf64_Word;
typedef uint64_t Elf64_Xword;
typedef uint64_t Elf64_Addr;
typedef uint64_t Elf64_Off;
#define EI_NIDENT 16
typedef struct {
unsigned char e_ident[EI_NIDENT];
Elf64_Half e_type;
Elf64_Half e_machine;
Elf64_Word e_version;
Elf64_Addr e_entry;
Elf64_Off e_phoff;
Elf64_Off e_shoff;
Elf64_Word e_flags;
Elf64_Half e_ehsize;
Elf64_Half e_phentsize;
Elf64_Half e_phnum;
Elf64_Half e_shentsize;
Elf64_Half e_shnum;
Elf64_Half e_shstrndx;
} Elf64_Ehdr;
#define EI_MAG0 0
#define EI_MAG1 1
#define EI_MAG2 2
#define EI_MAG3 3
#define EI_CLASS 4
#define EI_DATA 5
#define EI_VERSION 6
#define EI_OSABI 7
#define EI_ABIVERSION 8
#define EI_PAD 9
#define ELFMAG0 0x7f
#define ELFMAG1 'E'
#define ELFMAG2 'L'
#define ELFMAG3 'F'
#define ELFCLASS64 2 /* 64-bit object */
#define ELFDATA2LSB 1 /* LSB */
#define El_VERSION 1 /* defined to be the same as EV CURRENT */
#define ELFOSABI_NONE 0 /* unspecied */
#define El_ABIVERSION_NONE 0 /* unspecied */
#define ET_CORE 4 /* Core file */
#define EM_X86_64 62 /* AMD x86-64 architecture */
#define EM_K10M 181 /* Intel K10M */
#define EV_CURRENT 1 /* Current version */
typedef struct {
Elf64_Word p_type;
Elf64_Word p_flags;
Elf64_Off p_offset;
Elf64_Addr p_vaddr;
Elf64_Addr p_paddr;
Elf64_Xword p_filesz;
Elf64_Xword p_memsz;
Elf64_Xword p_align;
} Elf64_Phdr;
#define PT_LOAD 1
#define PT_NOTE 4
#define PF_X 1 /* executable bit */
#define PF_W 2 /* writable bit */
#define PF_R 4 /* readable bit */
struct note {
Elf64_Word namesz;
Elf64_Word descsz;
Elf64_Word type;
/* name char[namesz] and desc[descsz] */
};
#define NT_PRSTATUS 1
#define NT_PRFRPREG 2
#define NT_PRPSINFO 3
#define NT_AUXV 6
#define NT_X86_STATE 0x202
#include "elfcoregpl.h"
#endif /* !POSTK_DEBUG_ARCH_DEP_18 */

View File

@ -0,0 +1,96 @@
#ifndef POSTK_DEBUG_ARCH_DEP_18 /* coredump arch separation. */
/*
* Structures and defines from GPLed file.
*/
#define pid_t int
/* From /usr/include/linux/elfcore.h of Linux */
#define ELF_PRARGSZ (80)
/* From /usr/include/linux/elfcore.h fro Linux */
struct elf_siginfo
{
int si_signo;
int si_code;
int si_errno;
};
/* From bfd/hosts/x86-64linux.h of gdb. */
typedef uint64_t __attribute__ ((__aligned__ (8))) a8_uint64_t;
typedef a8_uint64_t elf_greg64_t;
struct user_regs64_struct
{
a8_uint64_t r15;
a8_uint64_t r14;
a8_uint64_t r13;
a8_uint64_t r12;
a8_uint64_t rbp;
a8_uint64_t rbx;
a8_uint64_t r11;
a8_uint64_t r10;
a8_uint64_t r9;
a8_uint64_t r8;
a8_uint64_t rax;
a8_uint64_t rcx;
a8_uint64_t rdx;
a8_uint64_t rsi;
a8_uint64_t rdi;
a8_uint64_t orig_rax;
a8_uint64_t rip;
a8_uint64_t cs;
a8_uint64_t eflags;
a8_uint64_t rsp;
a8_uint64_t ss;
a8_uint64_t fs_base;
a8_uint64_t gs_base;
a8_uint64_t ds;
a8_uint64_t es;
a8_uint64_t fs;
a8_uint64_t gs;
};
#define ELF_NGREG64 (sizeof (struct user_regs64_struct) / sizeof(elf_greg64_t))
typedef elf_greg64_t elf_gregset64_t[ELF_NGREG64];
struct prstatus64_timeval
{
a8_uint64_t tv_sec;
a8_uint64_t tv_usec;
};
struct elf_prstatus64
{
struct elf_siginfo pr_info;
short int pr_cursig;
a8_uint64_t pr_sigpend;
a8_uint64_t pr_sighold;
pid_t pr_pid;
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
struct prstatus64_timeval pr_utime;
struct prstatus64_timeval pr_stime;
struct prstatus64_timeval pr_cutime;
struct prstatus64_timeval pr_cstime;
elf_gregset64_t pr_reg;
int pr_fpvalid;
};
struct elf_prpsinfo64
{
char pr_state;
char pr_sname;
char pr_zomb;
char pr_nice;
a8_uint64_t pr_flag;
unsigned int pr_uid;
unsigned int pr_gid;
int pr_pid, pr_ppid, pr_pgrp, pr_sid;
char pr_fname[16];
char pr_psargs[ELF_PRARGSZ];
};
#endif /* !POSTK_DEBUG_ARCH_DEP_18 */

View File

@ -1,4 +1,5 @@
/* hwcap.h COPYRIGHT FUJITSU LIMITED 2017-2018 */
/* hwcap.h COPYRIGHT FUJITSU LIMITED 2017 */
#ifdef POSTK_DEBUG_ARCH_DEP_65
#ifndef _UAPI__ASM_HWCAP_H
#define _UAPI__ASM_HWCAP_H
@ -8,3 +9,4 @@ static unsigned long arch_get_hwcap(void)
}
#endif /* _UAPI__ASM_HWCAP_H */
#endif /* POSTK_DEBUG_ARCH_DEP_65 */

View File

@ -1,4 +1,3 @@
/* types.h COPYRIGHT FUJITSU LIMITED 2018 */
/**
* \file types.h
* Licence details are found in the file LICENSE.
@ -30,11 +29,13 @@ typedef uint64_t size_t;
typedef int64_t ssize_t;
typedef int64_t off_t;
#ifdef POSTK_DEBUG_ARCH_DEP_18 /* coredump arch separation. */
typedef int32_t key_t;
typedef uint32_t uid_t;
typedef uint32_t gid_t;
typedef int64_t time_t;
typedef int32_t pid_t;
#endif /* POSTK_DEBUG_ARCH_DEP_18 */
#define NULL ((void *)0)

View File

@ -9,9 +9,6 @@
#ifndef __ARCH_PRCTL_H
#define __ARCH_PRCTL_H
#define PR_SET_THP_DISABLE 41
#define PR_GET_THP_DISABLE 42
#define ARCH_SET_GS 0x1001
#define ARCH_SET_FS 0x1002
#define ARCH_GET_FS 0x1003

View File

@ -1,4 +1,4 @@
/* syscall_list.h COPYRIGHT FUJITSU LIMITED 2017-2018 */
/* syscall_list.h COPYRIGHT FUJITSU LIMITED 2017 */
/**
* \file syscall_list.h
* License details are found in the file LICENSE.
@ -109,7 +109,6 @@ SYSCALL_HANDLED(149, mlock)
SYSCALL_HANDLED(150, munlock)
SYSCALL_HANDLED(151, mlockall)
SYSCALL_HANDLED(152, munlockall)
SYSCALL_HANDLED(157, prctl)
SYSCALL_HANDLED(158, arch_prctl)
SYSCALL_HANDLED(160, setrlimit)
SYSCALL_HANDLED(164, settimeofday)
@ -134,19 +133,9 @@ SYSCALL_HANDLED(238, set_mempolicy)
SYSCALL_HANDLED(239, get_mempolicy)
SYSCALL_HANDLED(247, waitid)
SYSCALL_HANDLED(256, migrate_pages)
#ifdef POSTK_DEBUG_ARCH_DEP_62 /* Absorb the difference between open and openat args. */
SYSCALL_HANDLED(257, openat)
SYSCALL_DELEGATED(258, mkdirat)
SYSCALL_DELEGATED(259, mknodat)
SYSCALL_DELEGATED(260, fchownat)
SYSCALL_DELEGATED(261, futimesat)
SYSCALL_DELEGATED(262, newfstatat)
SYSCALL_DELEGATED(263, unlinkat)
SYSCALL_DELEGATED(264, renameat)
SYSCALL_DELEGATED(265, linkat)
SYSCALL_DELEGATED(266, symlinkat)
SYSCALL_DELEGATED(267, readlinkat)
SYSCALL_DELEGATED(268, fchmodat)
SYSCALL_DELEGATED(269, faccessat)
#endif /* POSTK_DEBUG_ARCH_DEP_62 */
SYSCALL_DELEGATED(270, pselect6)
SYSCALL_DELEGATED(271, ppoll)
SYSCALL_HANDLED(273, set_robust_list)

View File

@ -49,7 +49,7 @@ struct x86_cpu_local_variables *get_x86_cpu_local_variable(int id)
((char *)locals + (LOCALS_SPAN * id));
}
void *get_x86_cpu_local_kstack(int id)
static void *get_x86_cpu_local_kstack(int id)
{
return ((char *)locals + (LOCALS_SPAN * (id + 1)));
}

View File

@ -1,4 +1,3 @@
/* memory.c COPYRIGHT FUJITSU LIMITED 2018 */
/**
* \file memory.c
* License details are found in the file LICENSE.
@ -40,6 +39,8 @@ extern char _head[], _end[];
extern unsigned long x86_kernel_phys_base;
int safe_kernel_map = 0;
/* Arch specific early allocation routine */
void *early_alloc_pages(int nr_pages)
{
@ -108,7 +109,6 @@ struct page_table {
};
static struct page_table *init_pt;
static struct page_table *boot_pt;
static int init_pt_loaded = 0;
static ihk_spinlock_t init_pt_lock;
@ -1500,10 +1500,8 @@ static int clear_range_l1(void *args0, pte_t *ptep, uint64_t base,
if (page) {
dkprintf("%s: page=%p,is_in_memobj=%d,(old & PFL1_DIRTY)=%lx,memobj=%p,args->memobj->flags=%x\n", __FUNCTION__, page, page_is_in_memobj(page), (old & PFL1_DIRTY), args->memobj, args->memobj ? args->memobj->flags : -1);
}
if (page && page_is_in_memobj(page) &&
pte_is_dirty(&old, PTL1_SIZE) && args->memobj &&
!(args->memobj->flags & (MF_ZEROFILL | MF_PRIVATE))) {
if (page && page_is_in_memobj(page) && pte_is_dirty(&old, PTL1_SIZE) &&
args->memobj && !(args->memobj->flags & MF_ZEROFILL)) {
memobj_flush_page(args->memobj, phys, PTL1_SIZE);
}
@ -1569,9 +1567,7 @@ static int clear_range_l2(void *args0, pte_t *ptep, uint64_t base,
page = phys_to_page(phys);
}
if (page && page_is_in_memobj(page) &&
pte_is_dirty(&old, PTL2_SIZE) && args->memobj &&
!(args->memobj->flags & (MF_ZEROFILL | MF_PRIVATE))) {
if (page && page_is_in_memobj(page) && pte_is_dirty(&old, PTL2_SIZE)) {
memobj_flush_page(args->memobj, phys, PTL2_SIZE);
}
@ -1652,9 +1648,7 @@ static int clear_range_l3(void *args0, pte_t *ptep, uint64_t base,
page = phys_to_page(phys);
}
if (page && page_is_in_memobj(page) &&
pte_is_dirty(&old, PTL3_SIZE) && args->memobj &&
!(args->memobj->flags & (MF_ZEROFILL | MF_PRIVATE))) {
if (page && page_is_in_memobj(page) && pte_is_dirty(&old, PTL3_SIZE)) {
memobj_flush_page(args->memobj, phys, PTL3_SIZE);
}
@ -2247,7 +2241,7 @@ out:
int ihk_mc_pt_set_range(page_table_t pt, struct process_vm *vm, void *start,
void *end, uintptr_t phys, enum ihk_mc_pt_attribute attr,
int pgshift, struct vm_range *range, int overwrite)
int pgshift, struct vm_range *range)
{
int error;
struct set_range_args args;
@ -2456,7 +2450,7 @@ static int move_one_page(void *arg0, page_table_t pt, pte_t *ptep,
attr = apte & ~PT_PHYSMASK;
error = ihk_mc_pt_set_range(pt, args->vm, (void *)dest,
(void *)(dest + pgsize), phys, attr, pgshift, args->range, 0);
(void *)(dest + pgsize), phys, attr, pgshift, args->range);
if (error) {
kprintf("move_one_page(%p,%p,%p %#lx,%p,%d):"
"set failed. %d\n",
@ -2520,11 +2514,6 @@ struct page_table *get_init_page_table(void)
return init_pt;
}
struct page_table *get_boot_page_table(void)
{
return boot_pt;
}
static unsigned long fixed_virt;
static void init_fixed_area(struct page_table *pt)
{
@ -2554,8 +2543,6 @@ static void init_normal_area(struct page_table *pt)
}
}
extern char *find_command_line(char *name);
static void init_linux_kernel_mapping(struct page_table *pt)
{
unsigned long map_start, map_end, phys;
@ -2565,7 +2552,7 @@ static void init_linux_kernel_mapping(struct page_table *pt)
/* In case of safe_kernel_map option (safe_kernel_map == 1),
* processing to prevent destruction of the memory area on Linux side
* is executed */
if (find_command_line("safe_kernel_map") == NULL) {
if (safe_kernel_map == 0) {
kprintf("Straight-map entire physical memory\n");
/* Map 2 TB for now */
@ -2701,18 +2688,10 @@ void init_page_table(void)
init_normal_area(init_pt);
init_linux_kernel_mapping(init_pt);
init_fixed_area(init_pt);
init_low_area(init_pt);
init_text_area(init_pt);
init_vsyscall_area(init_pt);
/* boot page table: needs zero mapping in order to execute the next
* instruction that jumps into regular regions
*/
boot_pt = ihk_mc_alloc_pages(1, IHK_MC_AP_CRITICAL);
memcpy(boot_pt, init_pt, sizeof(*boot_pt));
init_low_area(boot_pt);
if (memcmp(init_pt, boot_pt, sizeof(*init_pt)) == 0)
panic("init low area for boot pt did not affect toplevel entry");
load_page_table(init_pt);
init_pt_loaded = 1;
kprintf("Page table is now at 0x%lx\n", init_pt);

View File

@ -1,4 +1,3 @@
/* perfctr.c COPYRIGHT FUJITSU LIMITED 2018 */
/**
* \file perfctr.c
* License details are found in the file LICENSE.
@ -21,7 +20,7 @@
extern unsigned int *x86_march_perfmap;
extern int running_on_kvm(void);
static int ihk_mc_perfctr_fixed_init(int counter, int mode);
int ihk_mc_perfctr_fixed_init(int counter, int mode);
//#define PERFCTR_DEBUG
#ifdef PERFCTR_DEBUG
@ -318,7 +317,7 @@ int ihk_mc_perfctr_start(unsigned long counter_mask)
goto fn_exit;
}
int ihk_mc_perfctr_stop(unsigned long counter_mask, int flags)
int ihk_mc_perfctr_stop(unsigned long counter_mask)
{
int ret = 0;
unsigned long value;
@ -355,7 +354,7 @@ int ihk_mc_perfctr_stop(unsigned long counter_mask, int flags)
}
// init for fixed counter
static int ihk_mc_perfctr_fixed_init(int counter, int mode)
int ihk_mc_perfctr_fixed_init(int counter, int mode)
{
unsigned long value = 0;
unsigned int ctr_mask = 0xf;
@ -499,17 +498,3 @@ int ihk_mc_perfctr_alloc_counter(unsigned int *type, unsigned long *config, unsi
return ret;
}
int ihk_mc_perf_counter_mask_check(unsigned long counter_mask)
{
if ((counter_mask & PERF_COUNTERS_MASK) |
(counter_mask & FIXED_PERF_COUNTERS_MASK)) {
return 1;
}
return 0;
}
int ihk_mc_perf_get_num_counters(void)
{
return NUM_PERF_COUNTERS;
}

View File

@ -1,4 +1,3 @@
/* syscall.c COPYRIGHT FUJITSU LIMITED 2018 */
/**
* \file syscall.c
* License details are found in the file LICENSE.
@ -170,38 +169,6 @@ fault:
return -EFAULT;
}
SYSCALL_DECLARE(prctl)
{
struct process *proc = cpu_local_var(current)->proc;
int option = (int)ihk_mc_syscall_arg0(ctx);
unsigned long arg2 = (unsigned long)ihk_mc_syscall_arg1(ctx);
unsigned long arg3 = (unsigned long)ihk_mc_syscall_arg2(ctx);
unsigned long arg4 = (unsigned long)ihk_mc_syscall_arg3(ctx);
unsigned long arg5 = (unsigned long)ihk_mc_syscall_arg4(ctx);
int ret = 0;
switch (option) {
case PR_SET_THP_DISABLE:
if (arg3 || arg4 || arg5) {
return -EINVAL;
}
proc->thp_disable = arg2;
ret = 0;
break;
case PR_GET_THP_DISABLE:
if (arg2 || arg3 || arg4 || arg5) {
return -EINVAL;
}
ret = proc->thp_disable;
break;
default:
ret = syscall_generic_forwarding(__NR_prctl, ctx);
break;
}
return ret;
}
struct sigsp {
unsigned long flags;
void *link;
@ -1304,8 +1271,8 @@ do_kill(struct thread *thread, int pid, int tid, int sig, siginfo_t *info,
struct mcs_rwlock_node_irqsave slock;
int pgid = -pid;
int rc = -ESRCH;
int *pids = NULL;
int n = 0, nr_pids = 0;
int *pids;
int n = 0;
int sendme = 0;
if(pid == 0){
@ -1313,41 +1280,10 @@ do_kill(struct thread *thread, int pid, int tid, int sig, siginfo_t *info,
return -ESRCH;
pgid = thread->proc->pgid;
}
// Count nr of pids
pids = kmalloc(sizeof(int) * num_processors, IHK_MC_AP_NOWAIT);
if(!pids)
return -ENOMEM;
for(i = 0; i < HASH_SIZE; i++){
mcs_rwlock_reader_lock(&phash->lock[i], &slock);
list_for_each_entry(p, &phash->list[i], hash_list){
if(pgid != 1 && p->pgid != pgid)
continue;
if(thread && p->pid == thread->proc->pid){
sendme = 1;
continue;
}
++nr_pids;
}
mcs_rwlock_reader_unlock(&phash->lock[i], &slock);
}
if (nr_pids) {
pids = kmalloc(sizeof(int) * nr_pids, IHK_MC_AP_NOWAIT);
if(!pids)
return -ENOMEM;
}
else {
if (sendme) {
goto sendme;
}
return rc;
}
// Collect pids and do the kill
for(i = 0; i < HASH_SIZE; i++){
if (n == nr_pids) {
break;
}
mcs_rwlock_reader_lock(&phash->lock[i], &slock);
list_for_each_entry(p, &phash->list[i], hash_list){
if(pgid != 1 && p->pgid != pgid)
@ -1360,15 +1296,11 @@ do_kill(struct thread *thread, int pid, int tid, int sig, siginfo_t *info,
pids[n] = p->pid;
n++;
if (n == nr_pids) {
break;
}
}
mcs_rwlock_reader_unlock(&phash->lock[i], &slock);
}
for(i = 0; i < n; i++)
rc = do_kill(thread, pids[i], -1, sig, info, ptracecont);
sendme:
if(sendme)
rc = do_kill(thread, thread->proc->pid, -1, sig, info, ptracecont);
@ -1758,14 +1690,16 @@ SYSCALL_DECLARE(shmget)
dkprintf("shmget(%#lx,%#lx,%#x)\n", key, size, shmflg0);
if (shmflg & SHM_HUGETLB) {
int hugeshift = shmflg & (0x3F << SHM_HUGE_SHIFT);
if (hugeshift == 0) {
switch (shmflg & (0x3F << SHM_HUGE_SHIFT)) {
case 0:
shmflg |= SHM_HUGE_2MB; /* default hugepage size */
} else if (hugeshift == SHM_HUGE_2MB ||
hugeshift == SHM_HUGE_1GB) {
/*nop*/
} else {
break;
case SHM_HUGE_2MB:
case SHM_HUGE_1GB:
break;
default:
error = -EINVAL;
goto out;
}
@ -2048,7 +1982,7 @@ int arch_map_vdso(struct process_vm *vm)
s = vm->vdso_addr + (i * PAGE_SIZE);
e = s + PAGE_SIZE;
error = ihk_mc_pt_set_range(pt, vm, s, e,
vdso.vdso_physlist[i], attr, 0, range, 0);
vdso.vdso_physlist[i], attr, 0, range);
if (error) {
ekprintf("ihk_mc_pt_set_range failed. %d\n", error);
goto out;
@ -2080,7 +2014,7 @@ int arch_map_vdso(struct process_vm *vm)
e = s + PAGE_SIZE;
attr = PTATTR_ACTIVE | PTATTR_USER | PTATTR_NO_EXECUTE;
error = ihk_mc_pt_set_range(pt, vm, s, e,
vdso.vvar_phys, attr, 0, range, 0);
vdso.vvar_phys, attr, 0, range);
if (error) {
ekprintf("ihk_mc_pt_set_range failed. %d\n", error);
goto out;
@ -2091,7 +2025,7 @@ int arch_map_vdso(struct process_vm *vm)
e = s + PAGE_SIZE;
attr = PTATTR_ACTIVE | PTATTR_USER | PTATTR_NO_EXECUTE | PTATTR_UNCACHABLE;
error = ihk_mc_pt_set_range(pt, vm, s, e,
vdso.hpet_phys, attr, 0, range, 0);
vdso.hpet_phys, attr, 0, range);
if (error) {
ekprintf("ihk_mc_pt_set_range failed. %d\n", error);
goto out;
@ -2102,7 +2036,7 @@ int arch_map_vdso(struct process_vm *vm)
e = s + PAGE_SIZE;
attr = PTATTR_ACTIVE | PTATTR_USER | PTATTR_NO_EXECUTE;
error = ihk_mc_pt_set_range(pt, vm, s, e,
vdso.pvti_phys, attr, 0, range, 0);
vdso.pvti_phys, attr, 0, range);
if (error) {
ekprintf("ihk_mc_pt_set_range failed. %d\n", error);
goto out;

View File

@ -1,4 +1,3 @@
# mcoverlay-create-smp-x86.sh.in COPYRIGHT FUJITSU LIMITED 2018
# Overlay /proc, /sys with McKernel specific contents
#

View File

@ -1,5 +1,4 @@
#!/bin/bash
# mcreboot-attached-mic.sh.in COPYRIGHT FUJITSU LIMITED 2018
# \file arch/x86/tools/mcreboot-attached-mic.sh.in
# License details are found in the file LICENSE.

View File

@ -1,5 +1,4 @@
#!/bin/bash -x
# mcreboot-builtin-x86.sh.in COPYRIGHT FUJITSU LIMITED 2018
# \file arch/x86/tools/mcreboot-builtin-x86.sh.in
# License details are found in the file LICENSE.

View File

@ -1,5 +1,4 @@
#!/bin/bash
# mcreboot-smp-x86.sh.in COPYRIGHT FUJITSU LIMITED 2018
# IHK SMP-x86 example boot script.
# author: Balazs Gerofi <bgerofi@riken.jp>
@ -18,8 +17,8 @@ prefix="@prefix@"
BINDIR="${prefix}/bin"
SBINDIR="${prefix}/sbin"
ETCDIR=@ETCDIR@
KMODDIR="@KMODDIR@"
KERNDIR="@MCKERNELDIR@"
KMODDIR="${prefix}/kmod"
KERNDIR="${prefix}/@TARGET@/kernel"
ENABLE_MCOVERLAYFS="@ENABLE_MCOVERLAYFS@"
MCK_BUILDID=@BUILDID@
@ -32,12 +31,6 @@ if [ "${BASH_VERSINFO[0]}" -lt 4 ]; then
exit 1
fi
# Check SELinux
if which getenforce 1>/dev/null 2>/dev/null && [ "`getenforce | tr '[:upper:]' '[:lower:]'`" == "enforcing" ]; then
echo "error: SELinux must not be enabled when running McKernel (update /etc/selinux/config or see setenforce)"
exit 1
fi
redirect_kmsg=0
mon_interval="-1"
DUMP_LEVEL=24
@ -56,9 +49,8 @@ safe_kernel_map=""
umask_old=`umask`
idle_halt=""
allow_oversubscribe=""
time_sharing="time_sharing"
while getopts stk:c:m:o:f:r:q:i:d:e:hOT: OPT
while getopts stk:c:m:o:f:r:q:i:d:e:hO OPT
do
case ${OPT} in
f) facility=${OPTARG}
@ -89,14 +81,6 @@ do
;;
O) allow_oversubscribe="allow_oversubscribe"
;;
T)
case ${OPTARG} in
1) time_sharing="time_sharing"
;;
0) time_sharing=""
;;
esac
;;
\?) exit 1
;;
esac
@ -138,32 +122,32 @@ error_exit() {
fi
;&
mcos_sys_mounted)
if [ "$ENABLE_MCOVERLAYFS" == "ON" ]; then
if [ "$enable_mcoverlay" == "yes" ]; then
umount /tmp/mcos/mcos0_sys
fi
;&
mcos_proc_mounted)
if [ "$ENABLE_MCOVERLAYFS" == "ON" ]; then
if [ "$enable_mcoverlay" == "yes" ]; then
umount /tmp/mcos/mcos0_proc
fi
;&
mcoverlayfs_loaded)
if [ "$ENABLE_MCOVERLAYFS" == "ON" ]; then
if [ "$enable_mcoverlay" == "yes" ]; then
rmmod mcoverlay 2>/dev/null
fi
;&
linux_proc_bind_mounted)
if [ "$ENABLE_MCOVERLAYFS" == "ON" ]; then
if [ "$enable_mcoverlay" == "yes" ]; then
umount /tmp/mcos/linux_proc
fi
;&
tmp_mcos_mounted)
if [ "$ENABLE_MCOVERLAYFS" == "ON" ]; then
if [ "$enable_mcoverlay" == "yes" ]; then
umount /tmp/mcos
fi
;&
tmp_mcos_created)
if [ "$ENABLE_MCOVERLAYFS" == "ON" ]; then
if [ "$enable_mcoverlay" == "yes" ]; then
rm -rf /tmp/mcos
fi
;&
@ -240,6 +224,26 @@ if [ "${release}" == "${rhel_release}" ]; then
rhel_release="";
fi
enable_mcoverlay="no"
if [ "${ENABLE_MCOVERLAYFS}" == "yes" ]; then
if [ "${rhel_release}" == "" ]; then
if [ ${linux_version_code} -ge 262144 -a ${linux_version_code} -lt 262400 ]; then
enable_mcoverlay="yes"
fi
if [ ${linux_version_code} -ge 263680 -a ${linux_version_code} -lt 263936 ]; then
enable_mcoverlay="yes"
fi
else
if [ ${linux_version_code} -eq 199168 -a ${rhel_release} -ge 327 -a ${rhel_release} -le 862 ]; then
enable_mcoverlay="yes"
fi
if [ ${linux_version_code} -ge 262144 -a ${linux_version_code} -lt 262400 ]; then
enable_mcoverlay="yes"
fi
fi
fi
# Figure out CPUs if not requested by user
if [ "$cpus" == "" ]; then
# Get the number of CPUs on NUMA node 0
@ -255,7 +259,7 @@ if [ "$cpus" == "" ]; then
fi
# Remove mcoverlay if loaded
if [ "$ENABLE_MCOVERLAYFS" == "ON" ]; then
if [ "$enable_mcoverlay" == "yes" ]; then
${SBINDIR}/mcoverlay-destroy.sh
ret=$?
if [ $ret -ne 0 ]; then
@ -392,7 +396,7 @@ fi
IHK_BUILDID=`${SBINDIR}/ihkconfig 0 get buildid`
if [ "${IHK_BUILDID}" != "${MCK_BUILDID}" ]; then
echo "IHK build-id (${IHK_BUILDID}) didn't match McKernel build-id (${MCK_BUILDID})." >&2
error_exit "mcctrl_loaded"
exit 1
fi
# Destroy all LWK instances
@ -445,7 +449,7 @@ if ! ${SBINDIR}/ihkosctl 0 load ${KERNDIR}/mckernel.img; then
fi
# Set kernel arguments
if ! ${SBINDIR}/ihkosctl 0 kargs "hidos $turbo $safe_kernel_map $idle_halt dump_level=${DUMP_LEVEL} $extra_kopts $allow_oversubscribe $time_sharing"; then
if ! ${SBINDIR}/ihkosctl 0 kargs "hidos $turbo $safe_kernel_map $idle_halt dump_level=${DUMP_LEVEL} $extra_kopts $allow_oversubscribe"; then
echo "error: setting kernel arguments" >&2
error_exit "os_created"
fi
@ -462,7 +466,7 @@ if ! chown ${chown_option} /dev/mcd* /dev/mcos*; then
fi
# Overlay /proc, /sys with McKernel specific contents
if [ "$ENABLE_MCOVERLAYFS" == "ON" ]; then
if [ "$enable_mcoverlay" == "yes" ]; then
${SBINDIR}/mcoverlay-create.sh
ret=$?
if [ $ret -ne 0 ]; then

View File

@ -1,5 +1,4 @@
#!/bin/bash
# mcstop+release-smp-x86.sh.in COPYRIGHT FUJITSU LIMITED 2018
# IHK SMP-x86 example McKernel unload script.
# author: Balazs Gerofi <bgerofi@riken.jp>
@ -100,6 +99,15 @@ if grep mcctrl /proc/modules &>/dev/null; then
fi
fi
# Remove mcoverlay if loaded
${SBINDIR}/mcoverlay-destroy.sh
ret=$?
if [ $ret -ne 0 ]; then
echo "error: mcoverlay-destroy.sh" >&2
exit $ret
fi
# Remove SMP module
if grep ihk_smp_@ARCH@ /proc/modules &>/dev/null; then
if ! rmmod ihk_smp_@ARCH@ 2>/dev/null; then

View File

@ -1,46 +0,0 @@
Cross compilation:
------------------
The standard way of cross compiling with cmake is to give cmake a "toolchain
file" that describes the compiler prefix and where it can find libraries for
the target system, we provide an example in cmake/cross-aarch64.cmake.
This obviously requires installing a toolchain and a rootfs with target
libraries to link against.
In addition, mckernel borrows the Kbuild system from linux kernel, which also
makes the assumption that you can run generated executables (Kbuild uses various
scripts around module building and does not make the distinction between build
target and host target); you can get this working by setting up qemu-user on your
machine which the kernel will transparently use through binfmt magic when trying
to execute other arch binaries.
# yum install gcc-aarch64-linux-gnu qemu-user
(dnf brings in --forcearch, we use this to setup the sysroot ; there are other
ways of building one. It is available on el7 in extras.)
# yum install dnf dnf-plugins-core
(install these separately because most scripts cannot be run, and dependency hell means we need them first)
# dnf download --releasever=7 --forcearch=aarch64 filesystem centos-release
# rpm --root /usr/aarch64-linux-gnu/sys-root --ignorearch --nodeps -ivh filesystem-*.rpm
# rpm --root /usr/aarch64-linux-gnu/sys-root --ignorearch --nodeps -ivh centos-release-*.rpm
# rpm --root /usr/aarch64-linux-gnu/sys-root --import /usr/aarch64-linux-gnu/sys-root/etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
# rpm --root /usr/aarch64-linux-gnu/sys-root --import /usr/aarch64-linux-gnu/sys-root/etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7-aarch64
# dnf install --releasever=7 --forcearch=aarch64 --installroot=/usr/aarch64-linux-gnu/sys-root/ --setopt=tsflags=noscripts glibc-devel kernel-devel numactl-devel systemd-devel binutils-devel
(el7 lacks a binfmt for aarch64... fix this)
# echo ':qemu-aarch64:M::\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\xb7\x00:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/usr/bin/qemu-aarch64:' > /etc/binfmt.d/qemu-aarch64-dynamic.conf
# systemctl restart systemd-binfmt
(optional) test your setup
# gcc -xc - <<<'#include <stdio.h>'$'\n''int main() { printf("ok\n"); return 0; }'
# export QEMU_LD_PREFIX=/usr/aarch64-linux-gnu/sys-root
# file a.out
a.out: ELF 64-bit LSB executable, ARM aarch64, version 1 (SYSV), dynamically linked (uses shared libs), for GNU/Linux 3.7.0, BuildID[sha1]=5ff445d3353cad2dae0a22550fe4cc572287dd90, not stripped
# ./a.out
ok
finally, build mckernel!
# mkdir build; cd build
# export QEMU_LD_PREFIX=/usr/aarch64-linux-gnu/sys-root
# cmake -DCMAKE_INSTALL_PREFIX=/tmp/install-aarch64 -DCMAKE_TOOLCHAIN_FILE=../cmake/cross-aarch64.cmake -DUNAME_R=4.14.0-115.2.2.el7a.aarch64 -DKERNEL_DIR=/usr/aarch64-linux-gnu/sys-root/usr/src/kernels/4.14.0-115.2.2.el7a.aarch64/ -DBUILD_TARGET=smp-arm64 ..
# make -j

View File

@ -1,10 +0,0 @@
SET(CMAKE_SYSTEM_NAME Linux)
SET(CMAKE_C_COMPILER /usr/bin/aarch64-linux-gnu-gcc)
SET(CMAKE_CXX_COMPILER /usr/bin/aarch64-linux-gnu-g++)
SET(CMAKE_FIND_ROOT_PATH /usr/aarch64-linux-gnu/sys-root)
SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)

View File

@ -1,100 +0,0 @@
# Interface to kbuild
#
# Generate Kbuild file as appropriate and call make to the kernel build system
# Original goal was to be simple, but correctness is difficult...
set(UNAME_R ${CMAKE_SYSTEM_VERSION} CACHE STRING "Kernel version to build against")
set(KERNEL_DIR "/lib/modules/${UNAME_R}/build" CACHE STRING "kernel build directory")
set(KBUILD_C_FLAGS "" CACHE STRING "Compiler flags to give to Kbuild.")
set(KBUILD_MAKE_FLAGS "" CACHE STRING "Extra make arguments for Kbuild.")
mark_as_advanced(
KBUILD_C_FLAGS
KBUILD_MAKE_FLAGS
)
function(kmod MODULE_NAME)
cmake_parse_arguments(KMOD "" "INSTALL_DEST" "C_FLAGS;SOURCES;EXTRA_SYMBOLS;DEPENDS" ${ARGN})
add_custom_target(${MODULE_NAME}_ko ALL
DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/${MODULE_NAME}.ko"
"${CMAKE_CURRENT_BINARY_DIR}/Module.symvers")
string(REGEX REPLACE "\\.c(;|$)" ".o\\1" KMOD_OBJECTS "${KMOD_SOURCES}")
string(REPLACE ";" " " OBJECTS "${KMOD_OBJECTS}")
string(REPLACE ";" " " C_FLAGS "${KMOD_C_FLAGS}")
string(REPLACE ";" " " EXTRA_SYMBOLS "${KMOD_EXTRA_SYMBOLS}")
if(ENABLE_WERROR)
set(ccflags "${KBUILD_C_FLAGS} ${C_FLAGS} -Werror")
else(ENABLE_WERROR)
set(ccflags "${KBUILD_C_FLAGS} ${C_FLAGS}")
endif(ENABLE_WERROR)
configure_file(${CMAKE_SOURCE_DIR}/cmake/modules/Kbuild.in
${CMAKE_CURRENT_BINARY_DIR}/Kbuild)
if (${CMAKE_GENERATOR} STREQUAL Ninja)
set(MAKE "make")
list(APPEND KBUILD_MAKE_FLAGS "-j")
else ()
set(MAKE "$(MAKE)")
endif ()
if (NOT "${ARCH}" STREQUAL "${CMAKE_HOST_SYSTEM_PROCESSOR}")
string(REGEX REPLACE "ld$" "" CROSS_COMPILE "${CMAKE_LINKER}")
list(APPEND KBUILD_MAKE_FLAGS "ARCH=${ARCH};CROSS_COMPILE=${CROSS_COMPILE}")
endif()
string(REGEX REPLACE "\\.c(;|$)" ".o.cmd\\1" KMOD_O_CMD "${KMOD_SOURCES}")
string(REGEX REPLACE "[^/;]+(;|$)" ".\\0" KMOD_O_CMD "${KMOD_O_CMD}")
# This custom command has two uses:
# - first is to list kbuild output files, so make clean does something
# (cmake does not let us add a custom command to make clean)
# - this alone could have been added to the other command, but cmake insists
# on messing with timestamps with touch_nocreate after the command runs,
# so it would incorrectly make intermediary outputs newer than the .ko
# and force kbuild to relink needlessly
add_custom_command(
OUTPUT
old_timestamp
${KMOD_OBJECTS}
${KMOD_O_CMD}
"${MODULE_NAME}.o"
".${MODULE_NAME}.o.cmd"
"${MODULE_NAME}.mod.c"
"${MODULE_NAME}.mod.o"
".${MODULE_NAME}.mod.o.cmd"
".${MODULE_NAME}.ko.cmd"
".tmp_versions/${MODULE_NAME}.mod"
".tmp_versions"
"modules.order"
COMMAND touch old_timestamp
)
# This custom command forces cmake to rebuild the module, so kbuild's dependencies
# (including header files modifications) kick in everytime.
# Ideally, should later be replaced by something parsing the .xxx.cmd files to have
# the native build system do these checks, if possible at all...
add_custom_command(OUTPUT kmod_always_rebuild COMMAND touch kmod_always_rebuild)
add_custom_command(
OUTPUT "${MODULE_NAME}.ko"
"Module.symvers"
COMMAND ${MAKE} ${KBUILD_MAKE_FLAGS} -C ${KERNEL_DIR}
M=${CMAKE_CURRENT_BINARY_DIR} modules
COMMAND rm -f kmod_always_rebuild
DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/Kbuild"
${KMOD_DEPENDS}
kmod_always_rebuild
old_timestamp
COMMENT "Building kmod ${MODULE_NAME}"
)
if (KMOD_INSTALL_DEST)
install(FILES "${CMAKE_CURRENT_BINARY_DIR}/${MODULE_NAME}.ko"
DESTINATION "${KMOD_INSTALL_DEST}")
endif (KMOD_INSTALL_DEST)
message("Defined module ${MODULE_NAME}")
endfunction(kmod)

View File

@ -1,7 +0,0 @@
ccflags-y := @ccflags@
src := @CMAKE_CURRENT_SOURCE_DIR@
KBUILD_EXTRA_SYMBOLS := @EXTRA_SYMBOLS@
obj-m := @MODULE_NAME@.o
@MODULE_NAME@-y := @OBJECTS@

View File

@ -1,43 +0,0 @@
# Lookup symbol addresses from Ksymbol file
set(SYSTEM_MAP "${KERNEL_DIR}/System.map" CACHE STRING "System map to look for symbols")
set(VMLINUX "${KERNEL_DIR}/vmlinux" CACHE STRING "kernel object file")
function(ksym SYMBOL)
cmake_parse_arguments(KSYM "" "PREFIX;SOURCE_FILE;SUFFIX" "" ${ARGN})
execute_process(COMMAND awk "/ ${SYMBOL}$/ { print $1 }" ${SYSTEM_MAP}
OUTPUT_VARIABLE ADDRESS_CANDIDATES OUTPUT_STRIP_TRAILING_WHITESPACE)
if (NOT ADDRESS_CANDIDATES)
return()
endif()
# listify and get first element
string(REPLACE "\n" ";" ADDRESS_CANDIDATES "${ADDRESS_CANDIDATES}")
list(GET ADDRESS_CANDIDATES 0 ADDRESS)
if (SOURCE_FILE)
foreach(ADDRESS IN LISTS ADDRESS_CANDIDATES)
execute_process(COMMAND addr2line -e ${VMLINUX} ${ADDRESS}
OUTPUT_VARIABLE LINE OUTPUT_STRIP_TRAILING_WHITESPACE)
if(LINE MATCHES ".*${SOURCE_FILE}:.*")
set(FOUND ADDRESS)
break()
endif()
endforeach(ADDRESS)
if(NOT FOUND)
return()
endif()
# ?! why only if source_file?...
execute_process(COMMAND "awk '/ __ksymtab_${SYMBOL}$/ { print $1 }'"
OUTPUT_VARIABLE SYMBOL_EXPORTED OUTPUT_STRIP_TRAILING_WHITESPACE)
if (SYMBOL_EXPORTED)
set(ADDRESS 0)
endif(SYMBOL_EXPORTED)
endif(SOURCE_FILE)
set(${KSYM_PREFIX}KSYM_${SYMBOL}${KSYM_SUFFIX} "0x${ADDRESS}" CACHE INTERNAL "symbol")
endfunction(ksym)

View File

@ -1,45 +1,91 @@
/* config.h.in. Generated from configure.ac by autoheader. */
/* IHK build-id to confirm IHK and McKernel built at the same time are used */
#define BUILDID "${BUILDID}"
/* Path of install directory for binary */
#undef BINDIR
/* version number */
#define MCKERNEL_VERSION "${MCKERNEL_VERSION}"
/* IHK build-id to confirm IHK and McKernel built at the same time are used */
#undef BUILDID
/* whether mcoverlayfs is enabled */
#cmakedefine ENABLE_MCOVERLAYFS 1
#undef ENABLE_MCOVERLAYFS
/* whether memdump feature is enabled */
#cmakedefine ENABLE_MEMDUMP 1
#undef ENABLE_MEMDUMP
/* whether perf is enabled */
#cmakedefine ENABLE_PERF 1
#undef ENABLE_PERF
/* whether qlmpi is enabled */
#cmakedefine ENABLE_QLMPI 1
#undef ENABLE_QLMPI
/* whether rusage is enabled */
#cmakedefine ENABLE_RUSAGE 1
#undef ENABLE_RUSAGE
/* whether UTI is enabled */
#cmakedefine ENABLE_UTI 1
/* Define to 1 if you have the <inttypes.h> header file. */
#undef HAVE_INTTYPES_H
/* whether undefined behaviour sanitizer is enabled */
#cmakedefine ENABLE_UBSAN 1
/* Define to 1 if you have the `bfd' library (-lbfd). */
#undef HAVE_LIBBFD
/* Define to 1 if you have the `iberty' library (-liberty). */
#undef HAVE_LIBIBERTY
/* Define to 1 if you have the <memory.h> header file. */
#undef HAVE_MEMORY_H
/* Define to 1 if you have the <stdint.h> header file. */
#undef HAVE_STDINT_H
/* Define to 1 if you have the <stdlib.h> header file. */
#undef HAVE_STDLIB_H
/* Define to 1 if you have the <strings.h> header file. */
#undef HAVE_STRINGS_H
/* Define to 1 if you have the <string.h> header file. */
#undef HAVE_STRING_H
/* Define to 1 if you have the <sys/stat.h> header file. */
#undef HAVE_SYS_STAT_H
/* Define to 1 if you have the <sys/types.h> header file. */
#undef HAVE_SYS_TYPES_H
/* Define to 1 if you have the <unistd.h> header file. */
#undef HAVE_UNISTD_H
/* McKernel specific headers */
#undef MCKERNEL_INCDIR
/* McKernel specific libraries */
#undef MCKERNEL_LIBDIR
/* Define to the address where bug reports for this package should be sent. */
#undef PACKAGE_BUGREPORT
/* Define to the full name of this package. */
#undef PACKAGE_NAME
/* Define to the full name and version of this package. */
#undef PACKAGE_STRING
/* Define to the one symbol short name of this package. */
#undef PACKAGE_TARNAME
/* Define to the home page for this package. */
#undef PACKAGE_URL
/* Define to the version of this package. */
#undef PACKAGE_VERSION
/* Path of bind-mount source directory */
#cmakedefine ROOTFSDIR "${ROOTFSDIR}"
/* Path of install directory for libraries */
#cmakedefine MCKERNEL_LIBDIR "${MCKERNEL_LIBDIR}"
/* Path of install directory for binary */
#cmakedefine BINDIR "${BINDIR}"
#undef ROOTFSDIR
/* Path of install directory for system binary */
#cmakedefine SBINDIR "${SBINDIR}"
#undef SBINDIR
/* for non-RHEL kernels */
#ifndef RHEL_RELEASE_VERSION
#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b))
#endif
/* Define to 1 if you have the ANSI C header files. */
#undef STDC_HEADERS
/* whether or not syscall_intercept library is linked */
#undef WITH_SYSCALL_INTERCEPT

Some files were not shown because too many files have changed in this diff Show More