initial upload
This commit is contained in:
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
.DS_Stroe
|
||||
292
CMakeLists.txt
Normal file
292
CMakeLists.txt
Normal file
@@ -0,0 +1,292 @@
|
||||
cmake_minimum_required(VERSION 3.10)
|
||||
|
||||
# set the project name
|
||||
set(CMAKE_PROJECT_NAME "TOMOATT")
|
||||
project(${CMAKE_PROJECT_NAME} VERSION 1.1.2 LANGUAGES C CXX )
|
||||
|
||||
# set install directory
|
||||
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
|
||||
|
||||
# check debug or release
|
||||
if (CMAKE_BUILD_TYPE STREQUAL "Debug")
|
||||
message(STATUS "Build type: Debug")
|
||||
else()
|
||||
message(STATUS "Build type: Release")
|
||||
endif()
|
||||
|
||||
# check compiler type
|
||||
message(STATUS "Compiler type: ${CMAKE_CXX_COMPILER_ID}")
|
||||
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
|
||||
if (CMAKE_BUILD_TYPE STREQUAL "Debug")
|
||||
set(CXX_ADDITIONAL_FLAGS "-Wall -pedantic -g -O0")
|
||||
else()
|
||||
set(CXX_ADDITIONAL_FLAGS "-Wall -pedantic -O3 -funroll-loops -ffast-math -ftree-vectorize")
|
||||
endif()
|
||||
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
|
||||
if (CMAKE_BUILD_TYPE STREQUAL "Debug")
|
||||
set(CXX_ADDITIONAL_FLAGS "-Wall -pedantic -g -O0 -lm -lstdc++fs")
|
||||
else()
|
||||
set(CXX_ADDITIONAL_FLAGS "-Wall -pedantic -lm -O3 -funroll-loops -ffast-math -ftree-vectorize -lstdc++fs")
|
||||
endif()
|
||||
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel")
|
||||
if (CMAKE_BUILD_TYPE STREQUAL "Debug")
|
||||
set(CXX_ADDITIONAL_FLAGS "-diag-disable=10441,2012,2015,2017,2047,2304,2305,3868,10193,10315,11074,11076 -Wall -pedantic -g -O0 -lm -lstdc++fs")
|
||||
else()
|
||||
set(CXX_ADDITIONAL_FLAGS "-diag-disable=10441,2012,2015,2017,2047,2304,2305,3868,10193,10315,11074,11076 -Wall -pedantic -O3 -funroll-loops -ffast-math -lm -ftree-vectorize -lstdc++fs")
|
||||
endif()
|
||||
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Fujitsu")
|
||||
MESSAGE(FATAL_ERROR "Fujitsu trad compiler is not supported. Please use clang mode.")
|
||||
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "FujitsuClang")
|
||||
MESSAGE("Compiler type: FujitsuClang")
|
||||
if (CMAKE_BUILD_TYPE STREQUAL "Debug")
|
||||
set(CXX_ADDITIONAL_FLAGS "-Nclang -g -O0 -std=c++17 -mcpu=a64fx+sve -march=armv8-a+sve")
|
||||
else()
|
||||
set(CXX_ADDITIONAL_FLAGS "-Nclang -Ofast -std=c++17 -mcpu=a64fx+sve -march=armv8-a+sve")
|
||||
endif()
|
||||
else()
|
||||
MESSAGE(FATAL_ERROR "Compiler type: Unknown")
|
||||
endif()
|
||||
# Default to C++17
|
||||
if(NOT CMAKE_CXX_STANDARD)
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
endif()
|
||||
|
||||
set(BUILD_TESTING OFF)
|
||||
option(FORCE_DOWNLOAD_EXTERNAL_LIBS "Force download and use external libraries" OFF)
|
||||
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CXX_ADDITIONAL_FLAGS}")
|
||||
|
||||
# find installed MPI
|
||||
message(STATUS "Running CMAKE FindMPI.cmake...")
|
||||
find_package(MPI)
|
||||
|
||||
message(STATUS "MPI_FOUND: ${MPI_FOUND}")
|
||||
message(STATUS "MPI_VERSION: ${MPI_VERSION}")
|
||||
|
||||
# find openmp ## WE DO NOT USE OPENMP BUT KEEP THIS FOR FUTURE USE
|
||||
###find_package(OpenMP)
|
||||
###if(OPENMP_FOUND)
|
||||
### message(STATUS "OpenMP found")
|
||||
### set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
|
||||
### set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
|
||||
### set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OpenMP_EXE_LINKER_FLAGS}")
|
||||
### add_definitions(-DUSE_OMP)
|
||||
###endif()
|
||||
|
||||
# find HDF5 parallel TODO: check parallel io is enable or not
|
||||
message(STATUS "Running CMAKE FindHDF5.cmake...")
|
||||
# set parallel HDF5 default
|
||||
set(HDF5_PREFER_PARALLEL TRUE)
|
||||
find_package(HDF5)
|
||||
if(HDF5_FOUND)
|
||||
message(STATUS "HDF5_FOUND: ${HDF5_FOUND}")
|
||||
add_definitions(-DUSE_HDF5)
|
||||
# check if HD5 PARALLEL is available
|
||||
if(HDF5_IS_PARALLEL)
|
||||
message(STATUS "HDF5 parallel is available.")
|
||||
else()
|
||||
message(FATAL "TomoATT requires HDF5 compiled with parallel IO option.")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# use collective io for HDF5: should be faster than independent io,
|
||||
#but there will be a memory overhead and may not work on some systems.
|
||||
#If you have a problem, please comment out this line.
|
||||
add_definitions(-DUSE_HDF5_IO_COLLECTIVE)
|
||||
|
||||
# precision setting (uncomment for single precision. default is double precision.)
|
||||
#add_definitions(-DSINGLE_PRECISION)
|
||||
|
||||
# use SIMD (SSE/AVX/AVX2/AVX512) for vectorization, which is faster than the default but use a little more memory.
|
||||
if (USE_SIMD)
|
||||
message(STATUS "TomoATT is compiled with SIMD.")
|
||||
add_definitions(-DUSE_SIMD)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native -mfma")
|
||||
endif()
|
||||
|
||||
# find cuda package if USE_CUDA is defined
|
||||
if(USE_CUDA)
|
||||
message(STATUS "Running CMAKE FindCUDA.cmake...")
|
||||
enable_language(CUDA)
|
||||
find_package(CUDA)
|
||||
else()
|
||||
message(STATUS "TomoATT is compiled without cuda, because -DUSE_CUDA=True is not defined")
|
||||
endif()
|
||||
|
||||
if(CUDA_FOUND) # TODO : add HIP here in the future
|
||||
message(STATUS "CUDA_FOUND: ${CUDA_FOUND}")
|
||||
add_definitions(-DUSE_CUDA)
|
||||
set(CUDA_LIBRARY_NAME "TOMOATT_CUDA")
|
||||
|
||||
# list of source and header files for cuda
|
||||
file(GLOB SOURCES_CUDA "cuda/*.cu")
|
||||
file(GLOB HEADERS_CUDA "cuda/*.cuh")
|
||||
|
||||
# cuda flag
|
||||
#
|
||||
# for production
|
||||
set(CMAKE_CUDA_FLAGS "-fPIC -O3 -use_fast_math -extra-device-vectorization -gencode arch=compute_61,code=sm_61")
|
||||
#
|
||||
# for debugging
|
||||
#set(CMAKE_CUDA_FLAGS "-fPIC -lineinfo -g -G -O0 -gencode arch=compute_61,code=sm_61")
|
||||
set(CMAKE_CUDA_STANDARD "11")
|
||||
message(STATUS, "TomoATT will be compiled with cuda.")
|
||||
else()
|
||||
message(STATUS "TomoATT will be compiled without cuda, because cuda is not found or -DUSE_CUDA=True was not specified.")
|
||||
endif()
|
||||
|
||||
# synchronize the adjuscent ghost layers for each direction oft the sweep
|
||||
# which is more frequent than the referred paper but necessary
|
||||
add_definitions(-DFREQ_SYNC_GHOST)
|
||||
|
||||
# find BLAS # WE DO NOT USE BLAS BUT KEEP THIS FOR FUTURE USE
|
||||
#find_package(BLAS)
|
||||
#if(BLAS_FOUND)
|
||||
# message(STATUS "BLAS_FOUND: ${BLAS_FOUND} at ${BLAS_LIBRARIES}, ${BLAS_INCLUDE_DIRS}")
|
||||
# add_definitions(-DUSE_BLAS)
|
||||
# find_path(BLAS_INCLUDE_DIRS cblas.h
|
||||
# /usr/include
|
||||
# /usr/local/include
|
||||
# /usr/local/include/openblas)
|
||||
#endif()
|
||||
|
||||
# submodules
|
||||
# yaml parser
|
||||
find_package(yaml-cpp 0.8 QUIET)
|
||||
if (yaml-cpp_FOUND AND NOT ${FORCE_DOWNLOAD_EXTERNAL_LIBS})
|
||||
message(STATUS "yaml-cpp found")
|
||||
message(STATUS "YAML_CPP_INCLUDE_DIR: ${YAML_CPP_INCLUDE_DIR}")
|
||||
message(STATUS "YAML_CPP_LIBRARIES: ${YAML_CPP_LIBRARIES}")
|
||||
else()
|
||||
message(STATUS "yaml-cpp not found. Using external_libs/yaml-cpp ...")
|
||||
add_subdirectory(external_libs)
|
||||
set(YAML_CPP_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/external_libs/yaml-cpp/include)
|
||||
set(YAML_CPP_LIBRARIES yaml-cpp)
|
||||
endif()
|
||||
|
||||
# add include directory
|
||||
include_directories(include cuda)
|
||||
|
||||
execute_process(
|
||||
COMMAND git rev-parse --short HEAD
|
||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
||||
OUTPUT_VARIABLE GIT_COMMIT_HASH
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
|
||||
configure_file(
|
||||
${PROJECT_SOURCE_DIR}/include/version.h.in
|
||||
${PROJECT_SOURCE_DIR}/include/version.h
|
||||
)
|
||||
|
||||
# list of source files
|
||||
file(GLOB SOURCES "src/*.cpp")
|
||||
if(CUDA_FOUND)
|
||||
file(GLOB HEADERS "include/*.h" "cuda/*.cuh")
|
||||
else()
|
||||
file(GLOB HEADERS "include/*.h")
|
||||
endif()
|
||||
file(GLOB SOURCES_EXT_XML "external_libs/tinyxml2/*.cpp")
|
||||
|
||||
# compile cuda code
|
||||
if (CUDA_FOUND)
|
||||
include_directories(${CUDA_INCLUDE_DIRS})
|
||||
add_library(${CUDA_LIBRARY_NAME} STATIC ${SOURCES_CUDA} ${HEADERS_CUDA} )
|
||||
target_include_directories(${CUDA_LIBRARY_NAME} PUBLIC ${YAML_CPP_INCLUDE_DIR})
|
||||
target_include_directories(${CUDA_LIBRARY_NAME} PUBLIC ${HDF5_INCLUDE_DIRS})
|
||||
target_include_directories(${CUDA_LIBRARY_NAME} PUBLIC ${PROJECT_SOURCE_DIR}/external_libs/tinyxml2)
|
||||
endif()
|
||||
|
||||
|
||||
#
|
||||
# compile the executables
|
||||
# all the files with the name *.cxx is compiled as an executable
|
||||
#
|
||||
#file( GLOB APP_SOURCES src/*.cxx )
|
||||
|
||||
# add one by one
|
||||
set(APP_SOURCES
|
||||
src/TOMOATT.cxx
|
||||
#src/TOMOATT_solver_only.cxx
|
||||
#src/TOMOATT_2d_precalc.cxx
|
||||
#src/SrcRecWeight.cxx
|
||||
)
|
||||
|
||||
# if BUILD_TESTING is defined, make APP_TEST and append to APP_SOURCES list
|
||||
if(BUILD_TESTING)
|
||||
# use all the cxx files in tests/ directory
|
||||
file( GLOB APP_TEST tests/*.cxx)
|
||||
# or you can specify the test files one by one
|
||||
#set(APP_TEST
|
||||
# tests/read_write_srcrec.cxx
|
||||
#)
|
||||
list(APPEND APP_SOURCES ${APP_TEST})
|
||||
endif()
|
||||
|
||||
foreach( execsourcefile ${APP_SOURCES} )
|
||||
# get app name from file name
|
||||
get_filename_component(EXEC_NAME ${execsourcefile} NAME_WE)
|
||||
|
||||
# add the executable
|
||||
add_executable(${EXEC_NAME} ${execsourcefile} ${SOURCES} ${HEADERS} ${SOURCES_EXT_XML})
|
||||
|
||||
# set include path
|
||||
target_include_directories(${EXEC_NAME} PRIVATE
|
||||
${PROJECT_SOURCE_DIR}/include
|
||||
${PROJECT_SOURCE_DIR}/cuda
|
||||
${PROJECT_SOURCE_DIR}/external_libs/tinyxml2)
|
||||
|
||||
# link mpi
|
||||
target_link_libraries(${EXEC_NAME} PUBLIC MPI::MPI_CXX)
|
||||
|
||||
# link yaml-app:
|
||||
target_link_libraries(${EXEC_NAME} PUBLIC ${YAML_CPP_LIBRARIES})
|
||||
target_include_directories(${EXEC_NAME} PUBLIC ${YAML_CPP_INCLUDE_DIR})
|
||||
|
||||
# link HDF5
|
||||
if(HDF5_FOUND)
|
||||
target_link_libraries(${EXEC_NAME} PUBLIC ${HDF5_LIBRARIES})
|
||||
target_include_directories(${EXEC_NAME} PUBLIC ${HDF5_INCLUDE_DIRS})
|
||||
endif()
|
||||
|
||||
# link blas
|
||||
if(BLAS_FOUND)
|
||||
target_link_libraries(${EXEC_NAME} PUBLIC ${BLAS_LIBRARIES})
|
||||
target_include_directories(${EXEC_NAME} PUBLIC ${BLAS_INCLUDE_DIRS})
|
||||
endif()
|
||||
|
||||
# link cuda
|
||||
if (CUDA_FOUND)
|
||||
|
||||
#set_target_properties(${CUDA_LIBRARY_NAME} PROPERTIES CUDA_ARCHITECTURES "35;50;72")
|
||||
set_target_properties(${CUDA_LIBRARY_NAME} PROPERTIES CUDA_ARCHITECTURES "61")
|
||||
set_property(TARGET ${CUDA_LIBRARY_NAME} PROPERTY CUDA_ARCHITECTURES 61)
|
||||
|
||||
target_link_libraries(${EXEC_NAME} PRIVATE ${CUDA_LIBRARY_NAME})
|
||||
target_link_libraries(${CUDA_LIBRARY_NAME} PUBLIC MPI::MPI_CXX)
|
||||
target_link_libraries(${CUDA_LIBRARY_NAME} PUBLIC yaml-cpp)
|
||||
target_link_libraries(${CUDA_LIBRARY_NAME} PUBLIC ${HDF5_LIBRARIES})
|
||||
|
||||
endif()
|
||||
|
||||
|
||||
endforeach( execsourcefile ${APP_SOURCES} )
|
||||
|
||||
# install
|
||||
install(TARGETS TOMOATT DESTINATION bin)
|
||||
|
||||
# test
|
||||
|
||||
# We check if this is the main file
|
||||
# you don't usually want users of your library to
|
||||
# execute tests as part of their build
|
||||
if (${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR} AND BUILD_TESTING)
|
||||
include(CTest)
|
||||
|
||||
# loop over APP_TEST
|
||||
foreach( execsourcefile ${APP_TEST} )
|
||||
add_test(NAME ${execsourcefile} COMMAND ${EXEC_NAME} ${execsourcefile})
|
||||
endforeach( execsourcefile ${APP_TEST} )
|
||||
endif ()
|
||||
|
||||
enable_testing()
|
||||
674
LICENSE
Normal file
674
LICENSE
Normal file
@@ -0,0 +1,674 @@
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU General Public License is a free, copyleft license for
|
||||
software and other kinds of works.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
the GNU General Public License is intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users. We, the Free Software Foundation, use the
|
||||
GNU General Public License for most of our software; it applies also to
|
||||
any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to prevent others from denying you
|
||||
these rights or asking you to surrender the rights. Therefore, you have
|
||||
certain responsibilities if you distribute copies of the software, or if
|
||||
you modify it: responsibilities to respect the freedom of others.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must pass on to the recipients the same
|
||||
freedoms that you received. You must make sure that they, too, receive
|
||||
or can get the source code. And you must show them these terms so they
|
||||
know their rights.
|
||||
|
||||
Developers that use the GNU GPL protect your rights with two steps:
|
||||
(1) assert copyright on the software, and (2) offer you this License
|
||||
giving you legal permission to copy, distribute and/or modify it.
|
||||
|
||||
For the developers' and authors' protection, the GPL clearly explains
|
||||
that there is no warranty for this free software. For both users' and
|
||||
authors' sake, the GPL requires that modified versions be marked as
|
||||
changed, so that their problems will not be attributed erroneously to
|
||||
authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run
|
||||
modified versions of the software inside them, although the manufacturer
|
||||
can do so. This is fundamentally incompatible with the aim of
|
||||
protecting users' freedom to change the software. The systematic
|
||||
pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we
|
||||
have designed this version of the GPL to prohibit the practice for those
|
||||
products. If such problems arise substantially in other domains, we
|
||||
stand ready to extend this provision to those domains in future versions
|
||||
of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents.
|
||||
States should not allow patents to restrict development and use of
|
||||
software on general-purpose computers, but in those that do, we wish to
|
||||
avoid the special danger that patents applied to a free program could
|
||||
make it effectively proprietary. To prevent this, the GPL assures that
|
||||
patents cannot be used to render the program non-free.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Use with the GNU Affero General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU Affero General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the special requirements of the GNU Affero General Public License,
|
||||
section 13, concerning interaction through a network will apply to the
|
||||
combination as such.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
||||
102
README.md
Normal file
102
README.md
Normal file
@@ -0,0 +1,102 @@
|
||||
# TomoATT
|
||||
|
||||
[](LICENSE)
|
||||
[](https://github.com/mnagaso/TomoATT/actions/workflows/CI.yml)
|
||||
[](https://anaconda.org/conda-forge/tomoatt)
|
||||
[](https://anaconda.org/conda-forge/tomoatt)
|
||||
[](https://anaconda.org/conda-forge/tomoatt)
|
||||
|
||||

|
||||
|
||||
TomoATT is a library which implements an eikonal equation solver based Adjoint-state Traveltime Tomography for a very large-scale computation, which implements the methods described in the publications:
|
||||
|
||||
- **The TomoATT software package**
|
||||
- Chen, J., Nagaso, M., Xu, M., & Tong, P. (2025). TomoATT: An open-source package for Eikonal equation-based adjoint-state traveltime tomography for seismic velocity and azimuthal anisotropy. Computers & Geosciences, 105995. [DOI](https://doi.org/10.1016/j.cageo.2025.105995).
|
||||
|
||||
- **Regional tomography in Cartesian coordinates**
|
||||
|
||||
- Tong, P. (2021). Adjoint‐state traveltime tomography: Eikonal equation‐based methods and application to the Anza area in southern California. Journal of Geophysical Research: Solid Earth, 126(5), e2021JB021818. [DOI](https://doi.org/10.1029/2021JB021818).
|
||||
|
||||
- Tong, P. (2021). Adjoint‐state traveltime tomography for azimuthally anisotropic media and insight into the crustal structure of central California near Parkfield. Journal of Geophysical Research: Solid Earth, 126(10), e2021JB022365. [DOI](https://doi.org/10.1029/2021JB022365).
|
||||
|
||||
- **Regional tomography in Spherical coordinates**
|
||||
|
||||
- Chen, J., Chen, G., Nagaso, M., & Tong, P. (2023). Adjoint-state traveltime tomography for azimuthally anisotropic media in spherical coordinates. Geophysical Journal International, 234(1), 712-736. [DOI](https://doi.org/10.1093/gji/ggad093).
|
||||
|
||||
- **Teleseismic tomography in Spherical coordinates**
|
||||
|
||||
- Chen, J., Wu, S., Xu, M., Nagaso, M., Yao, J., Wang, K., ... & Tong, P. (2023). Adjoint‐state teleseismic traveltime tomography: Method and application to Thailand in Indochina Peninsula. Journal of Geophysical Research: Solid Earth, 128(12), e2023JB027348. [DOI](https://doi.org/10.1029/2023JB027348).
|
||||
|
||||
|
||||
Thanks to the efficiency of an eikonal equation solver, the computation of the travel-time is very fast and requires less amount of computational resources.
|
||||
As an input data for TomoATT is travel times at seismic stations, we can easily prepare a great amount of input data for the computation.
|
||||
|
||||
This library is developped to be used for modeling a very-large domain. For this purpose, 3-layer parallelization is applied, which are:
|
||||
- layer 1: simulutaneous run parallelization (travel times for multiple seismic sources may be calculated simultaneously)
|
||||
- layer 2: subdomain decomposition (If the number of computational nodes requires too large memory, we can separate the domain into subdomains and run each subdomain in a separate compute node)
|
||||
- layer 3: sweeping parallelization (in each subdomain, sweeping layers are also parallelized)
|
||||
|
||||
The details of the parallelization method applied in this library are described in the paper [Miles Detrixhe and Frédéric Gibou (2016)](https://doi.org/10.1016/j.jcp.2016.06.023).
|
||||
|
||||
Regional events (sources within the global domain) and teleseismic events (sources outside the global domain) may be used for inversion.
|
||||
|
||||
## Quick installation
|
||||
This library is available in the [conda-forge channel](https://anaconda.org/conda-forge/tomoatt). You can install it on personal computer by the following command:
|
||||
``` bash
|
||||
conda install -c conda-forge tomoatt pytomoatt
|
||||
```
|
||||
|
||||
TomoATT is also capable of running on high-performance computing (HPC) systems and. Detailed installation instructions are described in the [installation manual](https://tomoatt.com/docs/GetStarted/Installation/Dependencies).
|
||||
|
||||
<!--
|
||||
|
||||
## dependency
|
||||
- MPI v3.0 or higher
|
||||
|
||||
optinal:
|
||||
- HDF5 (parallel IO needs to be enabled)
|
||||
- h5py (used in pre/post processes examples)
|
||||
|
||||
## to clone
|
||||
``` bash
|
||||
git clone --recursive https://github.com/TomoATT/TomoATT.git
|
||||
```
|
||||
|
||||
## to compile
|
||||
``` bash
|
||||
mkdir build && cd build
|
||||
cmake .. && make -j 8
|
||||
```
|
||||
|
||||
compile with cuda support
|
||||
``` bash
|
||||
cmake .. -DUSE_CUDA=True && make -j 8
|
||||
``` -->
|
||||
|
||||
## to run an example
|
||||
``` bash
|
||||
mpirun -n 4 ./TOMOATT -i ./input_params.yml
|
||||
```
|
||||
Please check the [user manual](https://tomoatt.com/docs) and `examples` directory for the details.
|
||||
|
||||
|
||||
<!-- ## FAQs.
|
||||
### git submodule problem
|
||||
In the case you will get the error message below:
|
||||
``` text
|
||||
-- /(path to your tomoatt)/TomoATT/external_libs/yaml-cpp/.git does not exist. Initializing yaml-cpp submodule ...
|
||||
fatal: not a git repository (or any of the parent directories): .git
|
||||
CMake Error at external_libs/CMakeLists.txt:9 (message):
|
||||
/usr/bin/git submodule update --init dependencies/yaml-cpp failed with exit
|
||||
code 128, please checkout submodules
|
||||
Call Stack (most recent call first):
|
||||
external_libs/CMakeLists.txt:13 (initialize_submodule)
|
||||
```
|
||||
|
||||
You will need to update the submodule manuary by the command:
|
||||
``` bash
|
||||
git submodule update --init --recursive
|
||||
```
|
||||
|
||||
In the case that `git submodule` command doesn't work in your environment, you need to download yaml-cpp library from [its repository](https://github.com/jbeder/yaml-cpp), and place it in the `external_libs` directory,
|
||||
so that this directory is placed as `external_libs/yaml-cpp`. -->
|
||||
12
README_install_HDF5.md
Normal file
12
README_install_HDF5.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# Compile HDF5 library with parallel option from the source
|
||||
|
||||
1. run the install script `./install_mpi_and_hdf5_local.sh`, which compiles and creates openmpi and hdf5 executables in `./external_libs/local_mpi_hdf5/bin`
|
||||
|
||||
2. compile TOMOATT by
|
||||
``` bash
|
||||
mkdir build && cd build
|
||||
cmake .. -DCMAKE_PREFIX_PATH=$(pwd)/../external_libs/local_mpi_hdf5
|
||||
make -j16
|
||||
```
|
||||
|
||||
This creates TOMOATT executable at ./build/TOMOATT
|
||||
35
cuda/cuda_constants.cuh
Normal file
35
cuda/cuda_constants.cuh
Normal file
@@ -0,0 +1,35 @@
|
||||
#ifndef CUDA_CONSTANTS_H
|
||||
#define CUDA_CONSTANTS_H
|
||||
|
||||
#include <cuda_runtime.h>
|
||||
#include <cuda.h>
|
||||
|
||||
#define CUSTOMREAL double // need here for cuda kernels
|
||||
#define MPI_CR MPI_DOUBLE
|
||||
//#define CUSTOMREAL float // need here for cuda kernels
|
||||
//#define MPI_CR MPI_FLOAT
|
||||
|
||||
#define MPI_DUMMY_TAG_CUDA 9999
|
||||
|
||||
// maximum grid dimension in one direction of GPU
|
||||
//#define MAXIMUM_GRID_DIM 65535
|
||||
|
||||
#define CUDA_MAX_BLOCK_SIZE 1024
|
||||
#define CUDA_MAX_GRID_SIZE 65535
|
||||
#define CUDA_MAX_THREADS_PER_BLOCK 1024
|
||||
|
||||
//#define CUDA_SWEEPING_BLOCK_SIZE 16 // s
|
||||
//#define CUDA_SWEEPING_BLOCK_SIZE 32 // 15.254 s
|
||||
//#define CUDA_SWEEPING_BLOCK_SIZE 64 // 15.281 s
|
||||
//#define CUDA_SWEEPING_BLOCK_SIZE 128 // 15.378 s
|
||||
//#define CUDA_SWEEPING_BLOCK_SIZE 256 // s
|
||||
#define CUDA_SWEEPING_BLOCK_SIZE 512 //
|
||||
//#define CUDA_SWEEPING_BLOCK_SIZE 1024 //
|
||||
|
||||
|
||||
#define CUDA_L1_BLOCK_SIZE 128
|
||||
//#define CUDA_L1_BLOCK_SIZE 256
|
||||
|
||||
#define CUDA_MAX_NUM_STREAMS 32
|
||||
|
||||
#endif // CUDA_CONSTANTS_H
|
||||
145
cuda/cuda_initialize.cuh
Normal file
145
cuda/cuda_initialize.cuh
Normal file
@@ -0,0 +1,145 @@
|
||||
#ifndef CUDA_INITIALIZE_H
|
||||
#define CUDA_INITIALIZE_H
|
||||
|
||||
|
||||
#include <cuda_runtime.h>
|
||||
#include <cuda.h>
|
||||
#include <cuda_runtime_api.h>
|
||||
|
||||
//#include "config.h"
|
||||
#include "cuda_constants.cuh"
|
||||
|
||||
void get_free_memory(double* free_db, double* used_db, double* total_db) {
|
||||
|
||||
// gets memory usage in byte
|
||||
size_t free_byte ;
|
||||
size_t total_byte ;
|
||||
cudaError_t cuda_status = cudaMemGetInfo( &free_byte, &total_byte ) ;
|
||||
if ( cudaSuccess != cuda_status ){
|
||||
printf("Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(cuda_status) );
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
*free_db = (double)free_byte ;
|
||||
*total_db = (double)total_byte ;
|
||||
*used_db = *total_db - *free_db ;
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
// setup cuda constants and variables by reading device properties
|
||||
void initialize_cuda(){
|
||||
|
||||
std::cout << "Initializing CUDA..." << std::endl;
|
||||
|
||||
int ncuda_device;
|
||||
int device;
|
||||
|
||||
// count number of devices
|
||||
cudaGetDeviceCount(&ncuda_device);
|
||||
|
||||
cudaError_t err = cudaGetLastError();
|
||||
if (err != cudaSuccess) {
|
||||
printf("cudaGetDeviceCount returned error code %d after %d devices\n", err, ncuda_device);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (ncuda_device == 0)
|
||||
{
|
||||
printf("There is no device supporting CUDA\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
// set the active device
|
||||
if (ncuda_device >= 1){
|
||||
cudaDeviceReset();
|
||||
|
||||
device = world_rank % ncuda_device;
|
||||
cudaSetDevice(device);
|
||||
|
||||
cudaFree(0);
|
||||
|
||||
// check device is set
|
||||
cudaGetDevice(&device);
|
||||
if (device != world_rank % ncuda_device){
|
||||
printf("Error: Could not set device to %d\n", world_rank % ncuda_device);
|
||||
exit(1);
|
||||
}
|
||||
} // end if ncuda_device >= 1
|
||||
|
||||
cudaGetDevice(&device);
|
||||
|
||||
// get device properties
|
||||
cudaDeviceProp deviceProp; // in cuda_constants
|
||||
cudaGetDeviceProperties(&deviceProp, device);
|
||||
|
||||
// exit if machine has no cuda enable device
|
||||
if (deviceProp.major == 9999 && deviceProp.minor == 9999){
|
||||
printf("Error: No CUDA device found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
// print device properties
|
||||
char filename[256];
|
||||
|
||||
if (world_rank == 0){
|
||||
sprintf(filename, "cuda_device_info.txt");
|
||||
FILE *fp = fopen(filename, "w");
|
||||
|
||||
if(fp == NULL){
|
||||
printf("Error: Could not open file %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
// display device properties
|
||||
fprintf(fp,"Device Name = %s\n",deviceProp.name);
|
||||
fprintf(fp,"memory:\n");
|
||||
fprintf(fp," totalGlobalMem (in MB): %f\n",(unsigned long) deviceProp.totalGlobalMem / (1024.f * 1024.f));
|
||||
fprintf(fp," totalGlobalMem (in GB): %f\n",(unsigned long) deviceProp.totalGlobalMem / (1024.f * 1024.f * 1024.f));
|
||||
fprintf(fp," totalConstMem (in bytes): %lu\n",(unsigned long) deviceProp.totalConstMem);
|
||||
fprintf(fp," Maximum 1D texture size (in bytes): %lu\n",(unsigned long) deviceProp.maxTexture1D);
|
||||
fprintf(fp," sharedMemPerBlock (in bytes): %lu\n",(unsigned long) deviceProp.sharedMemPerBlock);
|
||||
fprintf(fp," regsPerBlock (in bytes): %lu\n",(unsigned long) deviceProp.regsPerBlock);
|
||||
fprintf(fp,"blocks:\n");
|
||||
fprintf(fp," Maximum number of threads per block: %d\n",deviceProp.maxThreadsPerBlock);
|
||||
fprintf(fp," Maximum size of each dimension of a block: %d x %d x %d\n",
|
||||
deviceProp.maxThreadsDim[0],deviceProp.maxThreadsDim[1],deviceProp.maxThreadsDim[2]);
|
||||
fprintf(fp," Maximum sizes of each dimension of a grid: %d x %d x %d\n",
|
||||
deviceProp.maxGridSize[0],deviceProp.maxGridSize[1],deviceProp.maxGridSize[2]);
|
||||
fprintf(fp,"features:\n");
|
||||
fprintf(fp," Compute capability of the device = %d.%d\n", deviceProp.major, deviceProp.minor);
|
||||
fprintf(fp," multiProcessorCount: %d\n",deviceProp.multiProcessorCount);
|
||||
if (deviceProp.canMapHostMemory){
|
||||
fprintf(fp," canMapHostMemory: TRUE\n");
|
||||
}else{
|
||||
fprintf(fp," canMapHostMemory: FALSE\n");
|
||||
}
|
||||
if (deviceProp.deviceOverlap){
|
||||
fprintf(fp," deviceOverlap: TRUE\n");
|
||||
}else{
|
||||
fprintf(fp," deviceOverlap: FALSE\n");
|
||||
}
|
||||
if (deviceProp.concurrentKernels){
|
||||
fprintf(fp," concurrentKernels: TRUE\n");
|
||||
}else{
|
||||
fprintf(fp," concurrentKernels: FALSE\n");
|
||||
}
|
||||
// outputs initial memory infos via cudaMemGetInfo()
|
||||
double free_db,used_db,total_db;
|
||||
get_free_memory(&free_db,&used_db,&total_db);
|
||||
fprintf(fp,"memory usage:\n");
|
||||
fprintf(fp," rank %d: GPU memory usage: used = %f MB, free = %f MB, total = %f MB\n",myrank,
|
||||
used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0);
|
||||
|
||||
// closes output file
|
||||
fclose(fp);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
void finalize_cuda(){
|
||||
cudaDeviceReset();
|
||||
}
|
||||
|
||||
#endif // CUDA_INITIALIZE_H
|
||||
186
cuda/cuda_utils.cu
Normal file
186
cuda/cuda_utils.cu
Normal file
@@ -0,0 +1,186 @@
|
||||
#include "cuda_utils.cuh"
|
||||
|
||||
|
||||
|
||||
// allocate memory on device
|
||||
cudaError_t allocate_memory_on_device_i(void** d_ptr, size_t size)
|
||||
{
|
||||
return cudaMalloc((void**) d_ptr, size * sizeof(int));
|
||||
}
|
||||
|
||||
cudaError_t allocate_memory_on_device_cv(void** d_ptr, size_t size)
|
||||
{
|
||||
return cudaMalloc((void**) d_ptr, size * sizeof(CUSTOMREAL));
|
||||
}
|
||||
|
||||
cudaError_t allocate_memory_on_device_bl(void** d_ptr, size_t size)
|
||||
{
|
||||
return cudaMalloc((void**) d_ptr, size * sizeof(bool));
|
||||
}
|
||||
|
||||
|
||||
// device-host shared memory (pinned memory) (maybe unnecessary for CUDA-aware MPI)
|
||||
cudaError_t allocate_memory_on_device_cv_pinned(void** d_ptr, size_t size)
|
||||
{
|
||||
return cudaMallocHost((void**) d_ptr, size * sizeof(CUSTOMREAL));
|
||||
}
|
||||
|
||||
|
||||
// deallocate memory on device
|
||||
cudaError_t deallocate_memory_on_device_i(int*& d_ptr)
|
||||
{
|
||||
return cudaFree(d_ptr);
|
||||
}
|
||||
|
||||
cudaError_t deallocate_memory_on_device_cv(CUSTOMREAL*& d_ptr)
|
||||
{
|
||||
return cudaFree(d_ptr);
|
||||
}
|
||||
|
||||
cudaError_t deallocate_memory_on_device_bl(bool*& d_ptr)
|
||||
{
|
||||
return cudaFree(d_ptr);
|
||||
}
|
||||
|
||||
|
||||
// copy memory between host and device
|
||||
cudaError_t copy_host_to_device_i(int* d_ptr, int* h_ptr, const size_t size)
|
||||
{
|
||||
return cudaMemcpy(d_ptr, h_ptr, size * sizeof(int), cudaMemcpyHostToDevice);
|
||||
}
|
||||
|
||||
cudaError_t copy_host_to_device_cv(CUSTOMREAL* d_ptr, CUSTOMREAL* h_ptr, const size_t size)
|
||||
{
|
||||
return cudaMemcpy(d_ptr, h_ptr, size * sizeof(CUSTOMREAL), cudaMemcpyHostToDevice);
|
||||
}
|
||||
|
||||
cudaError_t copy_host_to_device_bl(bool* d_ptr, bool* h_ptr, const size_t size)
|
||||
{
|
||||
return cudaMemcpy(d_ptr, h_ptr, size * sizeof(bool), cudaMemcpyHostToDevice);
|
||||
}
|
||||
|
||||
// copy memory from device to host
|
||||
cudaError_t copy_device_to_host_i(int* h_ptr, int* d_ptr, size_t size)
|
||||
{
|
||||
return cudaMemcpy(h_ptr, d_ptr, size * sizeof(int), cudaMemcpyDeviceToHost);
|
||||
}
|
||||
cudaError_t copy_device_to_host_cv(CUSTOMREAL* h_ptr, CUSTOMREAL* d_ptr, size_t size)
|
||||
{
|
||||
return cudaMemcpy(h_ptr, d_ptr, size * sizeof(CUSTOMREAL), cudaMemcpyDeviceToHost);
|
||||
}
|
||||
|
||||
|
||||
// allocate and copy to device
|
||||
void* allocate_and_copy_host_to_device_i(int* h_ptr, size_t size, int num)
|
||||
{
|
||||
void* d_ptr;
|
||||
|
||||
print_CUDA_error_if_any(allocate_memory_on_device_i(&d_ptr, size), num);
|
||||
print_CUDA_error_if_any(copy_host_to_device_i((int*)d_ptr, h_ptr, size),num);
|
||||
|
||||
return d_ptr;
|
||||
}
|
||||
|
||||
void* allocate_and_copy_host_to_device_cv(CUSTOMREAL* h_ptr, size_t size, int num)
|
||||
{
|
||||
void* d_ptr;
|
||||
print_CUDA_error_if_any(allocate_memory_on_device_cv(&d_ptr, size),num);
|
||||
print_CUDA_error_if_any(copy_host_to_device_cv((CUSTOMREAL*)d_ptr, h_ptr, size), num);
|
||||
|
||||
return d_ptr;
|
||||
}
|
||||
|
||||
void* allocate_and_copy_host_to_device_bl(bool* h_ptr, size_t size, int num)
|
||||
{
|
||||
void* d_ptr;
|
||||
print_CUDA_error_if_any(allocate_memory_on_device_bl(&d_ptr, size),num);
|
||||
print_CUDA_error_if_any(copy_host_to_device_bl((bool*)d_ptr, h_ptr, size), num);
|
||||
|
||||
return d_ptr;
|
||||
}
|
||||
|
||||
// allocate, flatten and copy from host to device
|
||||
void flatten_arr_i(int* h_ptr_flattened, std::vector<int*>&h_v, int size_total, int* size_each)
|
||||
{
|
||||
// flatten
|
||||
int counter = 0;
|
||||
int n_v = h_v.size();
|
||||
|
||||
for (int i = 0; i < n_v; i++) { // levels
|
||||
for (int j = 0; j < size_each[i]; j++) {
|
||||
h_ptr_flattened[counter] = h_v.at(i)[j];
|
||||
counter++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void flatten_arr_cv(CUSTOMREAL* h_ptr_flattened, std::vector<CUSTOMREAL*> &h_v, int size_total, int* size_each)
|
||||
{
|
||||
// flatten
|
||||
int counter = 0;
|
||||
int n_v = h_v.size();
|
||||
|
||||
for (int i = 0; i < n_v; i++) { // levels
|
||||
for (int j = 0; j < size_each[i]; j++) {
|
||||
h_ptr_flattened[counter] = h_v.at(i)[j];
|
||||
counter++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void flatten_arr_bl(bool* h_ptr_flattened, std::vector<bool*> &h_v, int size_total, int* size_each)
|
||||
{
|
||||
// flatten
|
||||
int counter = 0;
|
||||
int n_v = h_v.size();
|
||||
|
||||
for (int i = 0; i < n_v; i++) { // levels
|
||||
for (int j = 0; j < size_each[i]; j++) {
|
||||
h_ptr_flattened[counter] = h_v.at(i)[j];
|
||||
counter++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void* allocate_and_copy_host_to_device_flattened_i(std::vector<int*>& vh, int size_total, int* size_each, int num){
|
||||
// flatten
|
||||
int* h_ptr_flattened = new int[size_total];
|
||||
flatten_arr_i(h_ptr_flattened, vh, size_total, size_each);
|
||||
|
||||
// allocate and copy
|
||||
void* d_ptr = allocate_and_copy_host_to_device_i(h_ptr_flattened, size_total, num);
|
||||
|
||||
// free
|
||||
delete[] h_ptr_flattened;
|
||||
|
||||
return d_ptr;
|
||||
}
|
||||
|
||||
void* allocate_and_copy_host_to_device_flattened_cv(std::vector<CUSTOMREAL*>& vh, int size_total, int* size_each, int num){
|
||||
// flatten
|
||||
CUSTOMREAL* h_ptr_flattened = new CUSTOMREAL[size_total];
|
||||
flatten_arr_cv(h_ptr_flattened, vh, size_total, size_each);
|
||||
|
||||
// allocate and copy
|
||||
void* d_ptr = allocate_and_copy_host_to_device_cv(h_ptr_flattened, size_total, num);
|
||||
|
||||
// free
|
||||
delete[] h_ptr_flattened;
|
||||
|
||||
return d_ptr;
|
||||
}
|
||||
|
||||
void* allocate_and_copy_host_to_device_flattened_bl(std::vector<bool*>& vh, int size_total, int* size_each, int num){
|
||||
// flatten
|
||||
bool* h_ptr_flattened = new bool[size_total];
|
||||
flatten_arr_bl(h_ptr_flattened, vh, size_total, size_each);
|
||||
|
||||
// allocate and copy
|
||||
void* d_ptr = allocate_and_copy_host_to_device_bl(h_ptr_flattened, size_total, num);
|
||||
|
||||
// free
|
||||
delete[] h_ptr_flattened;
|
||||
|
||||
return d_ptr;
|
||||
}
|
||||
|
||||
211
cuda/cuda_utils.cuh
Normal file
211
cuda/cuda_utils.cuh
Normal file
@@ -0,0 +1,211 @@
|
||||
#ifndef CUDA_UTILS_H
|
||||
#define CUDA_UTILS_H
|
||||
|
||||
#include <mpi.h>
|
||||
#include <cuda_runtime.h>
|
||||
#include <cuda.h>
|
||||
#include <cuda_runtime_api.h>
|
||||
#include <stdio.h>
|
||||
#include <vector>
|
||||
|
||||
#include "cuda_constants.cuh"
|
||||
|
||||
// function to convert kernel,grid to ijk
|
||||
#define I2V_cuda(i,j,k,II,JJ) ((k)*(JJ)*(II)+(j)*(II)+(i))
|
||||
|
||||
|
||||
// allocate memory on device
|
||||
cudaError_t allocate_memory_on_device_i(void** d_ptr, size_t size);
|
||||
cudaError_t allocate_memory_on_device_cv(void** d_ptr, size_t size);
|
||||
cudaError_t allocate_memory_on_device_bl(void** d_ptr, size_t size);
|
||||
cudaError_t allocate_memory_on_device_cv_pinned(void** d_ptr, size_t size);
|
||||
|
||||
// deallocate memory on device
|
||||
cudaError_t deallocate_memory_on_device_i(int *&d_ptr);
|
||||
cudaError_t deallocate_memory_on_device_cv(CUSTOMREAL *&d_ptr);
|
||||
cudaError_t deallocate_memory_on_device_bl(bool *&d_ptr);
|
||||
|
||||
// copy memory from host to device
|
||||
cudaError_t copy_host_to_device_i(int *d_ptr, int *h_ptr, const size_t size);
|
||||
cudaError_t copy_host_to_device_cv(CUSTOMREAL *d_ptr, CUSTOMREAL *h_ptr, const size_t size);
|
||||
cudaError_t copy_host_to_device_bl(bool *d_ptr, bool *h_ptr, const size_t size);
|
||||
|
||||
// copy memory from device to host
|
||||
cudaError_t copy_device_to_host_i(int *h_ptr, int *d_ptr, size_t size);
|
||||
cudaError_t copy_device_to_host_cv(CUSTOMREAL *h_ptr, CUSTOMREAL *d_ptr, size_t size);
|
||||
|
||||
// allocate and copy to device
|
||||
void* allocate_and_copy_host_to_device_i(int* h_ptr, size_t size, int num);
|
||||
void* allocate_and_copy_host_to_device_cv(CUSTOMREAL* h_ptr, size_t size, int num);
|
||||
void* allocate_and_copy_host_to_device_bl(bool* h_ptr, size_t size, int num);
|
||||
|
||||
|
||||
// allocate, flatten and copy to device
|
||||
void flatten_arr_i(int* h_ptr_flattened, std::vector<int*> &h_v, int size_total, int* size_each);
|
||||
void flatten_arr_cv(CUSTOMREAL* h_ptr_flattened, std::vector<CUSTOMREAL*> &h_v, int size_total, int* size_each);
|
||||
void flatten_arr_bl(bool* h_ptr_flattened, std::vector<bool*> &h_v, int size_total, int* size_each);
|
||||
|
||||
|
||||
void* allocate_and_copy_host_to_device_flattened_i(std::vector<int*>&vh, int size_total, int* size_each, int num);
|
||||
void* allocate_and_copy_host_to_device_flattened_cv(std::vector<CUSTOMREAL*>& vh, int size_total, int* size_each, int num);
|
||||
void* allocate_and_copy_host_to_device_flattened_bl(std::vector<bool*>& vh, int size_total, int* size_each, int num);
|
||||
|
||||
|
||||
// mpi send recv
|
||||
static inline void cuda_send_cr(CUSTOMREAL* buf, int count, int dest, MPI_Comm inter_sub_comm){
|
||||
MPI_Send(buf, count, MPI_CR, dest, MPI_DUMMY_TAG_CUDA, inter_sub_comm);
|
||||
}
|
||||
|
||||
static inline void cuda_recv_cr(CUSTOMREAL* buf, int count, int source, MPI_Comm inter_sub_comm){
|
||||
MPI_Status stat;
|
||||
MPI_Recv(buf, count, MPI_CR, source, MPI_DUMMY_TAG_CUDA, inter_sub_comm, &stat);
|
||||
}
|
||||
|
||||
static inline void cuda_synchronize_all_sub(MPI_Comm& sub_comm){
|
||||
MPI_Barrier(sub_comm);
|
||||
}
|
||||
|
||||
inline void cuda_wait_req(MPI_Request& req){
|
||||
MPI_Status status;
|
||||
MPI_Wait(&req, &status);
|
||||
}
|
||||
|
||||
|
||||
|
||||
static inline void get_block_xy(int num_blocks, int* num_blocks_x, int* num_blocks_y) {
|
||||
// at first, the num_blocks_x is set with equal value of num_blocks, and num_blocks_y is set with value 1
|
||||
// when the num_blocks_x exceeds the block size limit of 65535, the num_blocks_x is divided by 2 and num_blocks_y is increased by 1
|
||||
*num_blocks_x = num_blocks;
|
||||
*num_blocks_y = 1;
|
||||
|
||||
while (*num_blocks_x > CUDA_MAX_GRID_SIZE) {
|
||||
*num_blocks_x = (int) ceil(*num_blocks_x * 0.5f);
|
||||
*num_blocks_y = *num_blocks_y * 2;;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
static inline void get_thread_block_for_3d_loop(int nx, int ny, int nz, dim3* threads, dim3* blocks) {
|
||||
threads->x = 8; threads->y = 8; threads->z = 8; // use 512 threads in total
|
||||
blocks->x = (nx + threads->x - 1)/threads->x;
|
||||
blocks->y = (ny + threads->y - 1)/threads->y;
|
||||
blocks->z = (nz + threads->z - 1)/threads->z;
|
||||
}
|
||||
|
||||
|
||||
static inline void get_thread_block_for_ibound(int nx, int ny, int nz, dim3* threads, dim3* blocks) {
|
||||
threads->x = nx; threads->y = 8; threads->z = 8;
|
||||
blocks->x = (nx + threads->x - 1)/threads->x;
|
||||
blocks->y = (ny + threads->y - 1)/threads->y;
|
||||
blocks->z = (nz + threads->z - 1)/threads->z;
|
||||
}
|
||||
|
||||
|
||||
static inline void get_thread_block_for_jbound(int nx, int ny, int nz, dim3* threads, dim3* blocks) {
|
||||
threads->x = 8; threads->y = ny; threads->z = 8;
|
||||
blocks->x = (nx + threads->x - 1)/threads->x;
|
||||
blocks->y = (ny + threads->y - 1)/threads->y;
|
||||
blocks->z = (nz + threads->z - 1)/threads->z;
|
||||
}
|
||||
|
||||
|
||||
static inline void get_thread_block_for_kbound(int nx, int ny, int nz, dim3* threads, dim3* blocks) {
|
||||
threads->x = 8; threads->y = 8; threads->z = nz;
|
||||
blocks->x = (nx + threads->x - 1)/threads->x;
|
||||
blocks->y = (ny + threads->y - 1)/threads->y;
|
||||
blocks->z = (nz + threads->z - 1)/threads->z;
|
||||
}
|
||||
|
||||
|
||||
inline void cuda_isend_cr(CUSTOMREAL* buf, int count, int dest, MPI_Comm& comm, MPI_Request& request){
|
||||
//MPI_Request request = MPI_REQUEST_NULL;
|
||||
//std::cout << "sending from : " << inter_sub_rank << ", to : " << dest <<", size : " << count << std::endl;
|
||||
int DUMMY_TAG = 9999;
|
||||
MPI_Isend(buf, count, MPI_CR, dest, DUMMY_TAG, comm, &request);
|
||||
}
|
||||
|
||||
inline void cuda_irecv_cr(CUSTOMREAL* buf, int count, int source, MPI_Comm& comm, MPI_Request& request){
|
||||
//MPI_Request request = MPI_REQUEST_NULL;
|
||||
//std::cout << "receiving by : " << inter_sub_rank << ", from : " << source << ", size : " << count << std::endl;
|
||||
int DUMMY_TAG = 9999;
|
||||
MPI_Irecv(buf, count, MPI_CR, source, DUMMY_TAG, comm, &request);
|
||||
}
|
||||
|
||||
|
||||
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
|
||||
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
|
||||
{
|
||||
if (code != cudaSuccess)
|
||||
{
|
||||
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
|
||||
if (abort) exit(code);
|
||||
}
|
||||
}
|
||||
|
||||
inline void print_memory_usage(){
|
||||
size_t free_byte ;
|
||||
size_t total_byte ;
|
||||
cudaError_t cuda_status = cudaMemGetInfo( &free_byte, &total_byte ) ;
|
||||
if ( cudaSuccess != cuda_status ){
|
||||
printf("Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(cuda_status) );
|
||||
exit(1);
|
||||
}
|
||||
|
||||
double free_db = (double)free_byte ;
|
||||
double total_db = (double)total_byte ;
|
||||
double used_db = total_db - free_db ;
|
||||
|
||||
printf("GPU memory usage: used = %f, free = %f MB, total = %f MB\n",
|
||||
used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0);
|
||||
}
|
||||
|
||||
inline void print_CUDA_error_if_any(cudaError_t err, int num) {
|
||||
if (cudaSuccess != err)
|
||||
{
|
||||
printf("\nCUDA error !!!!! <%s> !!!!! \nat CUDA call error code: # %d\n",cudaGetErrorString(err),num);
|
||||
fflush(stdout);
|
||||
|
||||
// outputs error file
|
||||
FILE* fp;
|
||||
int myrank;
|
||||
char filename[BUFSIZ];
|
||||
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
|
||||
sprintf(filename,"error_message_%06d.txt",myrank);
|
||||
fp = fopen(filename,"a+");
|
||||
if (fp != NULL) {
|
||||
fprintf(fp,"\nCUDA error !!!!! <%s> !!!!! \nat CUDA call error code: # %d\n",cudaGetErrorString(err),num);
|
||||
fclose(fp);
|
||||
}
|
||||
|
||||
// check memory usage
|
||||
size_t free_byte ;
|
||||
size_t total_byte ;
|
||||
cudaError_t cuda_status = cudaMemGetInfo( &free_byte, &total_byte ) ;
|
||||
|
||||
if ( cudaSuccess != cuda_status ){
|
||||
printf("Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(cuda_status) );
|
||||
fflush(stdout);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
// print usage
|
||||
double free_db = (double)free_byte ;
|
||||
double total_db = (double)total_byte ;
|
||||
double used_db = total_db - free_db ;
|
||||
printf("GPU memory usage: used = %f, free = %f MB, total = %f MB", used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0);
|
||||
|
||||
|
||||
// stops program
|
||||
//MPI_Abort(MPI_COMM_WORLD,1);
|
||||
MPI_Finalize();
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#endif // CUDA_UTILS_H
|
||||
681
cuda/grid_wrapper.cu
Normal file
681
cuda/grid_wrapper.cu
Normal file
@@ -0,0 +1,681 @@
|
||||
#include "grid_wrapper.cuh"
|
||||
|
||||
void cuda_initialize_grid_1st(std::vector< std::vector<int> >& ijk, Grid_on_device* grid_dv, int const& loc_I, int const& loc_J, int const& loc_K,
|
||||
CUSTOMREAL const& dp, CUSTOMREAL const& dt, CUSTOMREAL const& dr, \
|
||||
std::vector<std::vector<int*>> & vv_i__j__k__, \
|
||||
std::vector<std::vector<int*>> & vv_ip1j__k__, \
|
||||
std::vector<std::vector<int*>> & vv_im1j__k__, \
|
||||
std::vector<std::vector<int*>> & vv_i__jp1k__, \
|
||||
std::vector<std::vector<int*>> & vv_i__jm1k__, \
|
||||
std::vector<std::vector<int*>> & vv_i__j__kp1, \
|
||||
std::vector<std::vector<int*>> & vv_i__j__km1, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_fac_a, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_fac_b, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_fac_c, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_fac_f, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_T0v, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_T0r, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_T0t, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_T0p, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_fun, \
|
||||
std::vector<std::vector<bool*>> & vv_change){
|
||||
|
||||
// store grid parameters
|
||||
grid_dv->loc_I_host = loc_I;
|
||||
grid_dv->loc_J_host = loc_J;
|
||||
grid_dv->loc_K_host = loc_K;
|
||||
grid_dv->dr_host = dr;
|
||||
grid_dv->dt_host = dt;
|
||||
grid_dv->dp_host = dp;
|
||||
|
||||
// count node number
|
||||
grid_dv->n_nodes_total_host = 0;
|
||||
grid_dv->n_levels_host = ijk.size();
|
||||
// allocate grid_dv->n_nodes_on_levels_host
|
||||
grid_dv->n_nodes_on_levels_host = new int[grid_dv->n_levels_host];
|
||||
|
||||
for (int i=0; i<grid_dv->n_levels_host; i++){
|
||||
grid_dv->n_nodes_on_levels_host[i] = ijk[i].size();
|
||||
grid_dv->n_nodes_total_host += grid_dv->n_nodes_on_levels_host[i];
|
||||
// find max
|
||||
if (grid_dv->n_nodes_on_levels_host[i] > grid_dv->n_nodes_max_host){
|
||||
grid_dv->n_nodes_max_host = grid_dv->n_nodes_on_levels_host[i];
|
||||
}
|
||||
}
|
||||
|
||||
// allocate memory on device
|
||||
grid_dv->n_nodes_on_levels = (int*) allocate_and_copy_host_to_device_i(grid_dv->n_nodes_on_levels_host, grid_dv->n_levels_host, 0);
|
||||
|
||||
grid_dv->vv_i__j__k___0 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__k__.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 2);
|
||||
grid_dv->vv_ip1j__k___0 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_ip1j__k__.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 2);
|
||||
grid_dv->vv_im1j__k___0 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_im1j__k__.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 3);
|
||||
grid_dv->vv_i__jp1k___0 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jp1k__.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 4);
|
||||
grid_dv->vv_i__jm1k___0 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jm1k__.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 5);
|
||||
grid_dv->vv_i__j__kp1_0 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__kp1.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 6);
|
||||
grid_dv->vv_i__j__km1_0 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__km1.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 7);
|
||||
|
||||
grid_dv->vv_i__j__k___1 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__k__.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 8);
|
||||
grid_dv->vv_ip1j__k___1 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_ip1j__k__.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 9);
|
||||
grid_dv->vv_im1j__k___1 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_im1j__k__.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 10);
|
||||
grid_dv->vv_i__jp1k___1 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jp1k__.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 11);
|
||||
grid_dv->vv_i__jm1k___1 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jm1k__.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 12);
|
||||
grid_dv->vv_i__j__kp1_1 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__kp1.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 13);
|
||||
grid_dv->vv_i__j__km1_1 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__km1.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 14);
|
||||
|
||||
grid_dv->vv_i__j__k___2 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__k__.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 15);
|
||||
grid_dv->vv_ip1j__k___2 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_ip1j__k__.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 16);
|
||||
grid_dv->vv_im1j__k___2 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_im1j__k__.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 17);
|
||||
grid_dv->vv_i__jp1k___2 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jp1k__.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 18);
|
||||
grid_dv->vv_i__jm1k___2 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jm1k__.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 19);
|
||||
grid_dv->vv_i__j__kp1_2 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__kp1.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 20);
|
||||
grid_dv->vv_i__j__km1_2 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__km1.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 21);
|
||||
|
||||
grid_dv->vv_i__j__k___3 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__k__.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 22);
|
||||
grid_dv->vv_ip1j__k___3 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_ip1j__k__.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 23);
|
||||
grid_dv->vv_im1j__k___3 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_im1j__k__.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 24);
|
||||
grid_dv->vv_i__jp1k___3 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jp1k__.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 25);
|
||||
grid_dv->vv_i__jm1k___3 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jm1k__.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 26);
|
||||
grid_dv->vv_i__j__kp1_3 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__kp1.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 27);
|
||||
grid_dv->vv_i__j__km1_3 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__km1.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 28);
|
||||
|
||||
grid_dv->vv_i__j__k___4 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__k__.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 29);
|
||||
grid_dv->vv_ip1j__k___4 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_ip1j__k__.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 30);
|
||||
grid_dv->vv_im1j__k___4 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_im1j__k__.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 31);
|
||||
grid_dv->vv_i__jp1k___4 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jp1k__.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 32);
|
||||
grid_dv->vv_i__jm1k___4 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jm1k__.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 33);
|
||||
grid_dv->vv_i__j__kp1_4 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__kp1.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 34);
|
||||
grid_dv->vv_i__j__km1_4 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__km1.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 35);
|
||||
|
||||
grid_dv->vv_i__j__k___5 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__k__.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 36);
|
||||
grid_dv->vv_ip1j__k___5 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_ip1j__k__.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 37);
|
||||
grid_dv->vv_im1j__k___5 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_im1j__k__.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 38);
|
||||
grid_dv->vv_i__jp1k___5 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jp1k__.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 39);
|
||||
grid_dv->vv_i__jm1k___5 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jm1k__.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 40);
|
||||
grid_dv->vv_i__j__kp1_5 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__kp1.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 41);
|
||||
grid_dv->vv_i__j__km1_5 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__km1.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 42);
|
||||
|
||||
grid_dv->vv_i__j__k___6 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__k__.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 43);
|
||||
grid_dv->vv_ip1j__k___6 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_ip1j__k__.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 44);
|
||||
grid_dv->vv_im1j__k___6 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_im1j__k__.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 45);
|
||||
grid_dv->vv_i__jp1k___6 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jp1k__.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 46);
|
||||
grid_dv->vv_i__jm1k___6 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jm1k__.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 47);
|
||||
grid_dv->vv_i__j__kp1_6 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__kp1.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 48);
|
||||
grid_dv->vv_i__j__km1_6 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__km1.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 49);
|
||||
|
||||
grid_dv->vv_i__j__k___7 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__k__.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 50);
|
||||
grid_dv->vv_ip1j__k___7 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_ip1j__k__.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 51);
|
||||
grid_dv->vv_im1j__k___7 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_im1j__k__.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 52);
|
||||
grid_dv->vv_i__jp1k___7 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jp1k__.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 53);
|
||||
grid_dv->vv_i__jm1k___7 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jm1k__.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 54);
|
||||
grid_dv->vv_i__j__kp1_7 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__kp1.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 55);
|
||||
grid_dv->vv_i__j__km1_7 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__km1.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 56);
|
||||
|
||||
grid_dv->vv_fac_a_0 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_a.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 57);
|
||||
grid_dv->vv_fac_b_0 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_b.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 58);
|
||||
grid_dv->vv_fac_c_0 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_c.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 59);
|
||||
grid_dv->vv_fac_f_0 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_f.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 60);
|
||||
grid_dv->vv_T0v_0 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0v.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 61);
|
||||
grid_dv->vv_T0r_0 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0r.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 62);
|
||||
grid_dv->vv_T0t_0 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0t.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 63);
|
||||
grid_dv->vv_T0p_0 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0p.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 64);
|
||||
grid_dv->vv_fun_0 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fun.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 65);
|
||||
grid_dv->vv_change_0 = (bool*) allocate_and_copy_host_to_device_flattened_bl( vv_change.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 66);
|
||||
|
||||
grid_dv->vv_fac_a_1 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_a.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 67);
|
||||
grid_dv->vv_fac_b_1 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_b.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 68);
|
||||
grid_dv->vv_fac_c_1 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_c.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 69);
|
||||
grid_dv->vv_fac_f_1 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_f.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 70);
|
||||
grid_dv->vv_T0v_1 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0v.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 71);
|
||||
grid_dv->vv_T0r_1 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0r.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 72);
|
||||
grid_dv->vv_T0t_1 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0t.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 73);
|
||||
grid_dv->vv_T0p_1 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0p.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 74);
|
||||
grid_dv->vv_fun_1 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fun.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 75);
|
||||
grid_dv->vv_change_1 = (bool*) allocate_and_copy_host_to_device_flattened_bl( vv_change.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 76);
|
||||
|
||||
grid_dv->vv_fac_a_2 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_a.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 77);
|
||||
grid_dv->vv_fac_b_2 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_b.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 78);
|
||||
grid_dv->vv_fac_c_2 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_c.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 79);
|
||||
grid_dv->vv_fac_f_2 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_f.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 80);
|
||||
grid_dv->vv_T0v_2 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0v.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 81);
|
||||
grid_dv->vv_T0r_2 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0r.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 82);
|
||||
grid_dv->vv_T0t_2 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0t.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 83);
|
||||
grid_dv->vv_T0p_2 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0p.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 84);
|
||||
grid_dv->vv_fun_2 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fun.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 85);
|
||||
grid_dv->vv_change_2 = (bool*) allocate_and_copy_host_to_device_flattened_bl( vv_change.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 86);
|
||||
|
||||
grid_dv->vv_fac_a_3 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_a.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 77);
|
||||
grid_dv->vv_fac_b_3 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_b.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 78);
|
||||
grid_dv->vv_fac_c_3 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_c.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 79);
|
||||
grid_dv->vv_fac_f_3 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_f.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 80);
|
||||
grid_dv->vv_T0v_3 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0v.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 81);
|
||||
grid_dv->vv_T0r_3 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0r.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 82);
|
||||
grid_dv->vv_T0t_3 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0t.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 83);
|
||||
grid_dv->vv_T0p_3 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0p.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 84);
|
||||
grid_dv->vv_fun_3 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fun.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 85);
|
||||
grid_dv->vv_change_3 = (bool*) allocate_and_copy_host_to_device_flattened_bl( vv_change.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 86);
|
||||
|
||||
grid_dv->vv_fac_a_4 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_a.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 77);
|
||||
grid_dv->vv_fac_b_4 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_b.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 78);
|
||||
grid_dv->vv_fac_c_4 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_c.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 79);
|
||||
grid_dv->vv_fac_f_4 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_f.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 80);
|
||||
grid_dv->vv_T0v_4 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0v.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 81);
|
||||
grid_dv->vv_T0r_4 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0r.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 82);
|
||||
grid_dv->vv_T0t_4 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0t.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 83);
|
||||
grid_dv->vv_T0p_4 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0p.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 84);
|
||||
grid_dv->vv_fun_4 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fun.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 85);
|
||||
grid_dv->vv_change_4 = (bool*) allocate_and_copy_host_to_device_flattened_bl( vv_change.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 86);
|
||||
|
||||
grid_dv->vv_fac_a_5 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_a.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 77);
|
||||
grid_dv->vv_fac_b_5 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_b.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 78);
|
||||
grid_dv->vv_fac_c_5 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_c.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 79);
|
||||
grid_dv->vv_fac_f_5 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_f.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 80);
|
||||
grid_dv->vv_T0v_5 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0v.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 81);
|
||||
grid_dv->vv_T0r_5 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0r.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 82);
|
||||
grid_dv->vv_T0t_5 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0t.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 83);
|
||||
grid_dv->vv_T0p_5 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0p.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 84);
|
||||
grid_dv->vv_fun_5 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fun.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 85);
|
||||
grid_dv->vv_change_5 = (bool*) allocate_and_copy_host_to_device_flattened_bl( vv_change.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 86);
|
||||
|
||||
grid_dv->vv_fac_a_6 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_a.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 77);
|
||||
grid_dv->vv_fac_b_6 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_b.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 78);
|
||||
grid_dv->vv_fac_c_6 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_c.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 79);
|
||||
grid_dv->vv_fac_f_6 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_f.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 80);
|
||||
grid_dv->vv_T0v_6 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0v.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 81);
|
||||
grid_dv->vv_T0r_6 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0r.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 82);
|
||||
grid_dv->vv_T0t_6 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0t.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 83);
|
||||
grid_dv->vv_T0p_6 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0p.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 84);
|
||||
grid_dv->vv_fun_6 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fun.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 85);
|
||||
grid_dv->vv_change_6 = (bool*) allocate_and_copy_host_to_device_flattened_bl( vv_change.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 86);
|
||||
|
||||
grid_dv->vv_fac_a_7 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_a.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 77);
|
||||
grid_dv->vv_fac_b_7 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_b.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 78);
|
||||
grid_dv->vv_fac_c_7 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_c.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 79);
|
||||
grid_dv->vv_fac_f_7 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_f.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 80);
|
||||
grid_dv->vv_T0v_7 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0v.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 81);
|
||||
grid_dv->vv_T0r_7 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0r.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 82);
|
||||
grid_dv->vv_T0t_7 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0t.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 83);
|
||||
grid_dv->vv_T0p_7 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0p.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 84);
|
||||
grid_dv->vv_fun_7 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fun.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 85);
|
||||
grid_dv->vv_change_7 = (bool*) allocate_and_copy_host_to_device_flattened_bl( vv_change.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 86);
|
||||
|
||||
// allocate tau (need full grid including boundary nodes)
|
||||
print_CUDA_error_if_any(allocate_memory_on_device_cv((void**)&(grid_dv->tau), loc_I*loc_J*loc_K), 87);
|
||||
|
||||
|
||||
}
|
||||
|
||||
void cuda_initialize_grid_3rd(std::vector< std::vector<int> >& ijk, Grid_on_device* grid_dv, int const& loc_I, int const& loc_J, int const& loc_K,
|
||||
CUSTOMREAL const& dp, CUSTOMREAL const& dt, CUSTOMREAL const& dr, \
|
||||
std::vector<std::vector<int*>> & vv_i__j__k__, \
|
||||
std::vector<std::vector<int*>> & vv_ip1j__k__, \
|
||||
std::vector<std::vector<int*>> & vv_im1j__k__, \
|
||||
std::vector<std::vector<int*>> & vv_i__jp1k__, \
|
||||
std::vector<std::vector<int*>> & vv_i__jm1k__, \
|
||||
std::vector<std::vector<int*>> & vv_i__j__kp1, \
|
||||
std::vector<std::vector<int*>> & vv_i__j__km1, \
|
||||
std::vector<std::vector<int*>> & vv_ip2j__k__, \
|
||||
std::vector<std::vector<int*>> & vv_im2j__k__, \
|
||||
std::vector<std::vector<int*>> & vv_i__jp2k__, \
|
||||
std::vector<std::vector<int*>> & vv_i__jm2k__, \
|
||||
std::vector<std::vector<int*>> & vv_i__j__kp2, \
|
||||
std::vector<std::vector<int*>> & vv_i__j__km2, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_fac_a, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_fac_b, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_fac_c, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_fac_f, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_T0v, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_T0r, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_T0t, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_T0p, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_fun, \
|
||||
std::vector<std::vector<bool*>> & vv_change){
|
||||
|
||||
grid_dv->if_3rd_order = true;
|
||||
|
||||
// store grid parameters
|
||||
grid_dv->loc_I_host = loc_I;
|
||||
grid_dv->loc_J_host = loc_J;
|
||||
grid_dv->loc_K_host = loc_K;
|
||||
grid_dv->dr_host = dr;
|
||||
grid_dv->dt_host = dt;
|
||||
grid_dv->dp_host = dp;
|
||||
|
||||
// count node number
|
||||
grid_dv->n_nodes_total_host = 0;
|
||||
grid_dv->n_levels_host = ijk.size();
|
||||
grid_dv->n_nodes_on_levels_host = new int[grid_dv->n_levels_host];
|
||||
|
||||
for (int i = 0; i < grid_dv->n_levels_host; i++){
|
||||
grid_dv->n_nodes_on_levels_host[i] = ijk.at(i).size();
|
||||
grid_dv->n_nodes_total_host += grid_dv->n_nodes_on_levels_host[i];
|
||||
// find max
|
||||
if (grid_dv->n_nodes_on_levels_host[i] > grid_dv->n_nodes_max_host){
|
||||
grid_dv->n_nodes_max_host = grid_dv->n_nodes_on_levels_host[i];
|
||||
}
|
||||
}
|
||||
|
||||
// allocate memory on device
|
||||
grid_dv->n_nodes_on_levels = (int*) allocate_and_copy_host_to_device_i(grid_dv->n_nodes_on_levels_host, grid_dv->n_levels_host, 0);
|
||||
|
||||
grid_dv->vv_i__j__k___0 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__k__.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 1);
|
||||
grid_dv->vv_ip1j__k___0 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_ip1j__k__.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 2);
|
||||
grid_dv->vv_im1j__k___0 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_im1j__k__.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 3);
|
||||
grid_dv->vv_i__jp1k___0 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jp1k__.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 4);
|
||||
grid_dv->vv_i__jm1k___0 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jm1k__.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 5);
|
||||
grid_dv->vv_i__j__kp1_0 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__kp1.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 6);
|
||||
grid_dv->vv_i__j__km1_0 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__km1.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 7);
|
||||
grid_dv->vv_ip2j__k___0 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_ip2j__k__.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 8);
|
||||
grid_dv->vv_im2j__k___0 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_im2j__k__.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 9);
|
||||
grid_dv->vv_i__jp2k___0 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jp2k__.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 10);
|
||||
grid_dv->vv_i__jm2k___0 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jm2k__.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 11);
|
||||
grid_dv->vv_i__j__kp2_0 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__kp2.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 12);
|
||||
grid_dv->vv_i__j__km2_0 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__km2.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 13);
|
||||
|
||||
grid_dv->vv_i__j__k___1 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__k__.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 8);
|
||||
grid_dv->vv_ip1j__k___1 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_ip1j__k__.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 9);
|
||||
grid_dv->vv_im1j__k___1 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_im1j__k__.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 10);
|
||||
grid_dv->vv_i__jp1k___1 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jp1k__.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 11);
|
||||
grid_dv->vv_i__jm1k___1 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jm1k__.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 12);
|
||||
grid_dv->vv_i__j__kp1_1 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__kp1.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 13);
|
||||
grid_dv->vv_i__j__km1_1 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__km1.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 14);
|
||||
grid_dv->vv_ip2j__k___1 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_ip2j__k__.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 8);
|
||||
grid_dv->vv_im2j__k___1 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_im2j__k__.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 9);
|
||||
grid_dv->vv_i__jp2k___1 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jp2k__.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 10);
|
||||
grid_dv->vv_i__jm2k___1 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jm2k__.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 11);
|
||||
grid_dv->vv_i__j__kp2_1 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__kp2.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 12);
|
||||
grid_dv->vv_i__j__km2_1 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__km2.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 13);
|
||||
|
||||
grid_dv->vv_i__j__k___2 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__k__.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 15);
|
||||
grid_dv->vv_ip1j__k___2 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_ip1j__k__.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 16);
|
||||
grid_dv->vv_im1j__k___2 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_im1j__k__.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 17);
|
||||
grid_dv->vv_i__jp1k___2 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jp1k__.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 18);
|
||||
grid_dv->vv_i__jm1k___2 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jm1k__.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 19);
|
||||
grid_dv->vv_i__j__kp1_2 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__kp1.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 20);
|
||||
grid_dv->vv_i__j__km1_2 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__km1.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 21);
|
||||
grid_dv->vv_ip2j__k___2 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_ip2j__k__.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 8);
|
||||
grid_dv->vv_im2j__k___2 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_im2j__k__.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 9);
|
||||
grid_dv->vv_i__jp2k___2 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jp2k__.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 10);
|
||||
grid_dv->vv_i__jm2k___2 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jm2k__.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 11);
|
||||
grid_dv->vv_i__j__kp2_2 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__kp2.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 12);
|
||||
grid_dv->vv_i__j__km2_2 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__km2.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 13);
|
||||
|
||||
grid_dv->vv_i__j__k___3 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__k__.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 22);
|
||||
grid_dv->vv_ip1j__k___3 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_ip1j__k__.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 23);
|
||||
grid_dv->vv_im1j__k___3 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_im1j__k__.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 24);
|
||||
grid_dv->vv_i__jp1k___3 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jp1k__.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 25);
|
||||
grid_dv->vv_i__jm1k___3 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jm1k__.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 26);
|
||||
grid_dv->vv_i__j__kp1_3 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__kp1.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 27);
|
||||
grid_dv->vv_i__j__km1_3 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__km1.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 28);
|
||||
grid_dv->vv_ip2j__k___3 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_ip2j__k__.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 8);
|
||||
grid_dv->vv_im2j__k___3 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_im2j__k__.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 9);
|
||||
grid_dv->vv_i__jp2k___3 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jp2k__.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 10);
|
||||
grid_dv->vv_i__jm2k___3 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jm2k__.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 11);
|
||||
grid_dv->vv_i__j__kp2_3 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__kp2.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 12);
|
||||
grid_dv->vv_i__j__km2_3 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__km2.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 13);
|
||||
|
||||
grid_dv->vv_i__j__k___4 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__k__.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 29);
|
||||
grid_dv->vv_ip1j__k___4 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_ip1j__k__.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 30);
|
||||
grid_dv->vv_im1j__k___4 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_im1j__k__.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 31);
|
||||
grid_dv->vv_i__jp1k___4 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jp1k__.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 32);
|
||||
grid_dv->vv_i__jm1k___4 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jm1k__.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 33);
|
||||
grid_dv->vv_i__j__kp1_4 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__kp1.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 34);
|
||||
grid_dv->vv_i__j__km1_4 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__km1.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 35);
|
||||
grid_dv->vv_ip2j__k___4 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_ip2j__k__.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 8);
|
||||
grid_dv->vv_im2j__k___4 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_im2j__k__.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 9);
|
||||
grid_dv->vv_i__jp2k___4 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jp2k__.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 10);
|
||||
grid_dv->vv_i__jm2k___4 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jm2k__.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 11);
|
||||
grid_dv->vv_i__j__kp2_4 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__kp2.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 12);
|
||||
grid_dv->vv_i__j__km2_4 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__km2.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 13);
|
||||
|
||||
grid_dv->vv_i__j__k___5 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__k__.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 36);
|
||||
grid_dv->vv_ip1j__k___5 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_ip1j__k__.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 37);
|
||||
grid_dv->vv_im1j__k___5 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_im1j__k__.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 38);
|
||||
grid_dv->vv_i__jp1k___5 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jp1k__.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 39);
|
||||
grid_dv->vv_i__jm1k___5 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jm1k__.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 40);
|
||||
grid_dv->vv_i__j__kp1_5 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__kp1.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 41);
|
||||
grid_dv->vv_i__j__km1_5 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__km1.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 42);
|
||||
grid_dv->vv_ip2j__k___5 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_ip2j__k__.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 8);
|
||||
grid_dv->vv_im2j__k___5 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_im2j__k__.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 9);
|
||||
grid_dv->vv_i__jp2k___5 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jp2k__.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 10);
|
||||
grid_dv->vv_i__jm2k___5 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jm2k__.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 11);
|
||||
grid_dv->vv_i__j__kp2_5 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__kp2.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 12);
|
||||
grid_dv->vv_i__j__km2_5 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__km2.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 13);
|
||||
|
||||
grid_dv->vv_i__j__k___6 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__k__.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 43);
|
||||
grid_dv->vv_ip1j__k___6 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_ip1j__k__.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 44);
|
||||
grid_dv->vv_im1j__k___6 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_im1j__k__.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 45);
|
||||
grid_dv->vv_i__jp1k___6 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jp1k__.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 46);
|
||||
grid_dv->vv_i__jm1k___6 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jm1k__.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 47);
|
||||
grid_dv->vv_i__j__kp1_6 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__kp1.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 48);
|
||||
grid_dv->vv_i__j__km1_6 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__km1.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 49);
|
||||
grid_dv->vv_ip2j__k___6 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_ip2j__k__.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 8);
|
||||
grid_dv->vv_im2j__k___6 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_im2j__k__.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 9);
|
||||
grid_dv->vv_i__jp2k___6 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jp2k__.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 10);
|
||||
grid_dv->vv_i__jm2k___6 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jm2k__.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 11);
|
||||
grid_dv->vv_i__j__kp2_6 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__kp2.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 12);
|
||||
grid_dv->vv_i__j__km2_6 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__km2.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 13);
|
||||
|
||||
grid_dv->vv_i__j__k___7 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__k__.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 50);
|
||||
grid_dv->vv_ip1j__k___7 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_ip1j__k__.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 51);
|
||||
grid_dv->vv_im1j__k___7 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_im1j__k__.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 52);
|
||||
grid_dv->vv_i__jp1k___7 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jp1k__.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 53);
|
||||
grid_dv->vv_i__jm1k___7 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jm1k__.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 54);
|
||||
grid_dv->vv_i__j__kp1_7 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__kp1.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 55);
|
||||
grid_dv->vv_i__j__km1_7 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__km1.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 56);
|
||||
grid_dv->vv_ip2j__k___7 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_ip2j__k__.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 8);
|
||||
grid_dv->vv_im2j__k___7 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_im2j__k__.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 9);
|
||||
grid_dv->vv_i__jp2k___7 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jp2k__.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 10);
|
||||
grid_dv->vv_i__jm2k___7 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__jm2k__.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 11);
|
||||
grid_dv->vv_i__j__kp2_7 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__kp2.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 12);
|
||||
grid_dv->vv_i__j__km2_7 = (int*) allocate_and_copy_host_to_device_flattened_i(vv_i__j__km2.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 13);
|
||||
|
||||
grid_dv->vv_fac_a_0 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_a.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 57);
|
||||
grid_dv->vv_fac_b_0 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_b.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 58);
|
||||
grid_dv->vv_fac_c_0 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_c.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 59);
|
||||
grid_dv->vv_fac_f_0 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_f.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 60);
|
||||
grid_dv->vv_T0v_0 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0v.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 61);
|
||||
grid_dv->vv_T0r_0 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0r.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 62);
|
||||
grid_dv->vv_T0t_0 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0t.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 63);
|
||||
grid_dv->vv_T0p_0 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0p.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 64);
|
||||
grid_dv->vv_fun_0 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fun.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 65);
|
||||
grid_dv->vv_change_0 = (bool*) allocate_and_copy_host_to_device_flattened_bl( vv_change.at(0), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 66);
|
||||
|
||||
grid_dv->vv_fac_a_1 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_a.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 67);
|
||||
grid_dv->vv_fac_b_1 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_b.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 68);
|
||||
grid_dv->vv_fac_c_1 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_c.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 69);
|
||||
grid_dv->vv_fac_f_1 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_f.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 70);
|
||||
grid_dv->vv_T0v_1 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0v.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 71);
|
||||
grid_dv->vv_T0r_1 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0r.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 72);
|
||||
grid_dv->vv_T0t_1 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0t.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 73);
|
||||
grid_dv->vv_T0p_1 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0p.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 74);
|
||||
grid_dv->vv_fun_1 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fun.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 75);
|
||||
grid_dv->vv_change_1 = (bool*) allocate_and_copy_host_to_device_flattened_bl( vv_change.at(1), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 76);
|
||||
|
||||
grid_dv->vv_fac_a_2 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_a.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 77);
|
||||
grid_dv->vv_fac_b_2 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_b.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 78);
|
||||
grid_dv->vv_fac_c_2 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_c.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 79);
|
||||
grid_dv->vv_fac_f_2 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_f.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 80);
|
||||
grid_dv->vv_T0v_2 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0v.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 81);
|
||||
grid_dv->vv_T0r_2 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0r.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 82);
|
||||
grid_dv->vv_T0t_2 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0t.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 83);
|
||||
grid_dv->vv_T0p_2 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0p.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 84);
|
||||
grid_dv->vv_fun_2 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fun.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 85);
|
||||
grid_dv->vv_change_2 = (bool*) allocate_and_copy_host_to_device_flattened_bl( vv_change.at(2), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 86);
|
||||
|
||||
grid_dv->vv_fac_a_3 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_a.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 77);
|
||||
grid_dv->vv_fac_b_3 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_b.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 78);
|
||||
grid_dv->vv_fac_c_3 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_c.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 79);
|
||||
grid_dv->vv_fac_f_3 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_f.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 80);
|
||||
grid_dv->vv_T0v_3 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0v.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 81);
|
||||
grid_dv->vv_T0r_3 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0r.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 82);
|
||||
grid_dv->vv_T0t_3 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0t.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 83);
|
||||
grid_dv->vv_T0p_3 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0p.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 84);
|
||||
grid_dv->vv_fun_3 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fun.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 85);
|
||||
grid_dv->vv_change_3 = (bool*) allocate_and_copy_host_to_device_flattened_bl( vv_change.at(3), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 86);
|
||||
|
||||
grid_dv->vv_fac_a_4 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_a.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 77);
|
||||
grid_dv->vv_fac_b_4 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_b.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 78);
|
||||
grid_dv->vv_fac_c_4 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_c.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 79);
|
||||
grid_dv->vv_fac_f_4 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_f.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 80);
|
||||
grid_dv->vv_T0v_4 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0v.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 81);
|
||||
grid_dv->vv_T0r_4 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0r.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 82);
|
||||
grid_dv->vv_T0t_4 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0t.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 83);
|
||||
grid_dv->vv_T0p_4 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0p.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 84);
|
||||
grid_dv->vv_fun_4 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fun.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 85);
|
||||
grid_dv->vv_change_4 = (bool*) allocate_and_copy_host_to_device_flattened_bl( vv_change.at(4), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 86);
|
||||
|
||||
grid_dv->vv_fac_a_5 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_a.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 77);
|
||||
grid_dv->vv_fac_b_5 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_b.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 78);
|
||||
grid_dv->vv_fac_c_5 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_c.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 79);
|
||||
grid_dv->vv_fac_f_5 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_f.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 80);
|
||||
grid_dv->vv_T0v_5 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0v.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 81);
|
||||
grid_dv->vv_T0r_5 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0r.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 82);
|
||||
grid_dv->vv_T0t_5 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0t.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 83);
|
||||
grid_dv->vv_T0p_5 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0p.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 84);
|
||||
grid_dv->vv_fun_5 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fun.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 85);
|
||||
grid_dv->vv_change_5 = (bool*) allocate_and_copy_host_to_device_flattened_bl( vv_change.at(5), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 86);
|
||||
|
||||
grid_dv->vv_fac_a_6 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_a.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 77);
|
||||
grid_dv->vv_fac_b_6 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_b.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 78);
|
||||
grid_dv->vv_fac_c_6 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_c.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 79);
|
||||
grid_dv->vv_fac_f_6 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_f.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 80);
|
||||
grid_dv->vv_T0v_6 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0v.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 81);
|
||||
grid_dv->vv_T0r_6 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0r.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 82);
|
||||
grid_dv->vv_T0t_6 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0t.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 83);
|
||||
grid_dv->vv_T0p_6 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0p.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 84);
|
||||
grid_dv->vv_fun_6 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fun.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 85);
|
||||
grid_dv->vv_change_6 = (bool*) allocate_and_copy_host_to_device_flattened_bl( vv_change.at(6), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 86);
|
||||
|
||||
grid_dv->vv_fac_a_7 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_a.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 77);
|
||||
grid_dv->vv_fac_b_7 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_b.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 78);
|
||||
grid_dv->vv_fac_c_7 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_c.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 79);
|
||||
grid_dv->vv_fac_f_7 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fac_f.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 80);
|
||||
grid_dv->vv_T0v_7 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0v.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 81);
|
||||
grid_dv->vv_T0r_7 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0r.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 82);
|
||||
grid_dv->vv_T0t_7 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0t.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 83);
|
||||
grid_dv->vv_T0p_7 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_T0p.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 84);
|
||||
grid_dv->vv_fun_7 = (CUSTOMREAL*) allocate_and_copy_host_to_device_flattened_cv( vv_fun.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 85);
|
||||
grid_dv->vv_change_7 = (bool*) allocate_and_copy_host_to_device_flattened_bl( vv_change.at(7), grid_dv->n_nodes_total_host, grid_dv->n_nodes_on_levels_host, 86);
|
||||
|
||||
// allocate tau
|
||||
print_CUDA_error_if_any(allocate_memory_on_device_cv((void**)&(grid_dv->tau), loc_I*loc_J*loc_K), 87);
|
||||
|
||||
}
|
||||
|
||||
|
||||
void cuda_finalize_grid(Grid_on_device* grid_dv){
|
||||
// deallocate memory on device
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->n_nodes_on_levels), 10000);
|
||||
delete [] grid_dv->n_nodes_on_levels_host;
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__k___0), 1);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_ip1j__k___0), 2);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_im1j__k___0), 3);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jp1k___0), 4);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jm1k___0), 5);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__kp1_0), 6);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__km1_0), 7);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__k___1), 8);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_ip1j__k___1), 9);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_im1j__k___1), 10);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jp1k___1), 11);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jm1k___1), 12);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__kp1_1), 13);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__km1_1), 14);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__k___2), 15);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_ip1j__k___2), 16);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_im1j__k___2), 17);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jp1k___2), 18);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jm1k___2), 19);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__kp1_2), 20);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__km1_2), 21);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__k___3), 22);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_ip1j__k___3), 23);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_im1j__k___3), 24);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jp1k___3), 25);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jm1k___3), 26);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__kp1_3), 27);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__km1_3), 28);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__k___4), 29);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_ip1j__k___4), 30);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_im1j__k___4), 31);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jp1k___4), 32);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jm1k___4), 33);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__kp1_4), 34);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__km1_4), 35);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__k___5), 36);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_ip1j__k___5), 37);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_im1j__k___5), 38);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jp1k___5), 39);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jm1k___5), 40);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__kp1_5), 41);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__km1_5), 42);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__k___6), 43);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_ip1j__k___6), 44);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_im1j__k___6), 45);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jp1k___6), 46);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jm1k___6), 47);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__kp1_6), 48);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__km1_6), 49);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__k___7), 50);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_ip1j__k___7), 51);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_im1j__k___7), 52);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jp1k___7), 53);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jm1k___7), 54);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__kp1_7), 55);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__km1_7), 56);
|
||||
|
||||
if(grid_dv->if_3rd_order){
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_ip2j__k___0), 10008);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_im2j__k___0), 10009);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jp2k___0), 10010);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jm2k___0), 10011);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__kp2_0), 10012);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__km2_0), 10013);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_ip2j__k___1), 10008);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_im2j__k___1), 10009);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jp2k___1), 10010);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jm2k___1), 10011);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__kp2_1), 10012);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__km2_1), 10013);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_ip2j__k___2), 10008);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_im2j__k___2), 10009);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jp2k___2), 10010);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jm2k___2), 10011);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__kp2_2), 10012);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__km2_2), 10013);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_ip2j__k___3), 10008);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_im2j__k___3), 10009);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jp2k___3), 10010);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jm2k___3), 10011);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__kp2_3), 10012);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__km2_3), 10013);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_ip2j__k___4), 10008);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_im2j__k___4), 10009);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jp2k___4), 10010);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jm2k___4), 10011);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__kp2_4), 10012);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__km2_4), 10013);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_ip2j__k___5), 10008);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_im2j__k___5), 10009);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jp2k___5), 10010);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jm2k___5), 10011);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__kp2_5), 10012);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__km2_5), 10013);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_ip2j__k___6), 10008);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_im2j__k___6), 10009);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jp2k___6), 10010);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jm2k___6), 10011);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__kp2_6), 10012);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__km2_6), 10013);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_ip2j__k___7), 10008);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_im2j__k___7), 10009);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jp2k___7), 10010);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__jm2k___7), 10011);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__kp2_7), 10012);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_i(grid_dv->vv_i__j__km2_7), 10013);
|
||||
}
|
||||
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_a_0), 10057);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_b_0), 10058);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_c_0), 10059);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_f_0), 10060);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0v_0), 10061);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0r_0), 10062);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0t_0), 10063);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0p_0), 10064);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fun_0), 10065);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_bl(grid_dv->vv_change_0), 10066);
|
||||
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_a_1), 10067);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_b_1), 10068);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_c_1), 10069);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_f_1), 10070);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0v_1), 10071);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0r_1), 10072);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0t_1), 10073);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0p_1), 10074);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fun_1), 10075);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_bl(grid_dv->vv_change_1), 10076);
|
||||
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_a_2), 10077);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_b_2), 10078);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_c_2), 10079);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_f_2), 10080);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0v_2), 10081);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0r_2), 10082);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0t_2), 10083);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0p_2), 10084);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fun_2), 10085);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_bl(grid_dv->vv_change_2), 10086);
|
||||
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_a_3), 10077);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_b_3), 10078);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_c_3), 10079);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_f_3), 10080);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0v_3), 10081);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0r_3), 10082);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0t_3), 10083);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0p_3), 10084);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fun_3), 10085);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_bl(grid_dv->vv_change_3), 10086);
|
||||
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_a_4), 10077);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_b_4), 10078);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_c_4), 10079);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_f_4), 10080);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0v_4), 10081);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0r_4), 10082);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0t_4), 10083);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0p_4), 10084);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fun_4), 10085);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_bl(grid_dv->vv_change_4), 10086);
|
||||
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_a_5), 10077);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_b_5), 10078);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_c_5), 10079);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_f_5), 10080);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0v_5), 10081);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0r_5), 10082);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0t_5), 10083);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0p_5), 10084);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fun_5), 10085);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_bl(grid_dv->vv_change_5), 10086);
|
||||
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_a_6), 10077);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_b_6), 10078);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_c_6), 10079);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_f_6), 10080);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0v_6), 10081);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0r_6), 10082);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0t_6), 10083);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0p_6), 10084);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fun_6), 10085);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_bl(grid_dv->vv_change_6), 10086);
|
||||
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_a_7), 10077);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_b_7), 10078);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_c_7), 10079);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fac_f_7), 10080);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0v_7), 10081);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0r_7), 10082);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0t_7), 10083);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_T0p_7), 10084);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv( grid_dv->vv_fun_7), 10085);
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_bl(grid_dv->vv_change_7), 10086);
|
||||
|
||||
print_CUDA_error_if_any(deallocate_memory_on_device_cv(grid_dv->tau), 10087);
|
||||
|
||||
}
|
||||
|
||||
|
||||
// copy tau from host to device
|
||||
void cuda_copy_tau_to_device(Grid_on_device* grid_dv, CUSTOMREAL* tau_h){
|
||||
print_CUDA_error_if_any(copy_host_to_device_cv(grid_dv->tau, tau_h, grid_dv->loc_I_host*grid_dv->loc_J_host*grid_dv->loc_K_host), 10087);
|
||||
}
|
||||
|
||||
|
||||
// copy tau from device to host
|
||||
void cuda_copy_tau_to_host(Grid_on_device* grid_dv, CUSTOMREAL* tau_h){
|
||||
print_CUDA_error_if_any(copy_device_to_host_cv(tau_h, grid_dv->tau, grid_dv->loc_I_host*grid_dv->loc_J_host*grid_dv->loc_K_host), 10088);
|
||||
}
|
||||
|
||||
123
cuda/grid_wrapper.cuh
Normal file
123
cuda/grid_wrapper.cuh
Normal file
@@ -0,0 +1,123 @@
|
||||
#ifndef GRID_WRAPPER_CUH
|
||||
#define GRID_WRAPPER_CUH
|
||||
|
||||
#include <cuda_runtime.h>
|
||||
#include <cuda_runtime_api.h>
|
||||
#include <cuda.h>
|
||||
|
||||
#include <vector>
|
||||
#include <iostream>
|
||||
|
||||
//#include "config.h"
|
||||
#include "cuda_constants.cuh"
|
||||
#include "cuda_utils.cuh"
|
||||
|
||||
// structure for storing grid information on device
|
||||
typedef struct Grid_on_device {
|
||||
|
||||
// parameters
|
||||
int loc_I_host, loc_J_host, loc_K_host;
|
||||
int n_nodes_total_host;
|
||||
int n_nodes_max_host=0;
|
||||
int n_levels_host;
|
||||
CUSTOMREAL dr_host, dt_host, dp_host;
|
||||
|
||||
// index storage
|
||||
int* n_nodes_on_levels, *n_nodes_on_levels_host;
|
||||
|
||||
int* vv_i__j__k___0, *vv_i__j__k___1, *vv_i__j__k___2, *vv_i__j__k___3, *vv_i__j__k___4, *vv_i__j__k___5, *vv_i__j__k___6, *vv_i__j__k___7;
|
||||
int* vv_ip1j__k___0, *vv_ip1j__k___1, *vv_ip1j__k___2, *vv_ip1j__k___3, *vv_ip1j__k___4, *vv_ip1j__k___5, *vv_ip1j__k___6, *vv_ip1j__k___7;
|
||||
int* vv_im1j__k___0, *vv_im1j__k___1, *vv_im1j__k___2, *vv_im1j__k___3, *vv_im1j__k___4, *vv_im1j__k___5, *vv_im1j__k___6, *vv_im1j__k___7;
|
||||
int* vv_i__jp1k___0, *vv_i__jp1k___1, *vv_i__jp1k___2, *vv_i__jp1k___3, *vv_i__jp1k___4, *vv_i__jp1k___5, *vv_i__jp1k___6, *vv_i__jp1k___7;
|
||||
int* vv_i__jm1k___0, *vv_i__jm1k___1, *vv_i__jm1k___2, *vv_i__jm1k___3, *vv_i__jm1k___4, *vv_i__jm1k___5, *vv_i__jm1k___6, *vv_i__jm1k___7;
|
||||
int* vv_i__j__kp1_0, *vv_i__j__kp1_1, *vv_i__j__kp1_2, *vv_i__j__kp1_3, *vv_i__j__kp1_4, *vv_i__j__kp1_5, *vv_i__j__kp1_6, *vv_i__j__kp1_7;
|
||||
int* vv_i__j__km1_0, *vv_i__j__km1_1, *vv_i__j__km1_2, *vv_i__j__km1_3, *vv_i__j__km1_4, *vv_i__j__km1_5, *vv_i__j__km1_6, *vv_i__j__km1_7;
|
||||
int* vv_ip2j__k___0, *vv_ip2j__k___1, *vv_ip2j__k___2, *vv_ip2j__k___3, *vv_ip2j__k___4, *vv_ip2j__k___5, *vv_ip2j__k___6, *vv_ip2j__k___7;
|
||||
int* vv_im2j__k___0, *vv_im2j__k___1, *vv_im2j__k___2, *vv_im2j__k___3, *vv_im2j__k___4, *vv_im2j__k___5, *vv_im2j__k___6, *vv_im2j__k___7;
|
||||
int* vv_i__jp2k___0, *vv_i__jp2k___1, *vv_i__jp2k___2, *vv_i__jp2k___3, *vv_i__jp2k___4, *vv_i__jp2k___5, *vv_i__jp2k___6, *vv_i__jp2k___7;
|
||||
int* vv_i__jm2k___0, *vv_i__jm2k___1, *vv_i__jm2k___2, *vv_i__jm2k___3, *vv_i__jm2k___4, *vv_i__jm2k___5, *vv_i__jm2k___6, *vv_i__jm2k___7;
|
||||
int* vv_i__j__kp2_0, *vv_i__j__kp2_1, *vv_i__j__kp2_2, *vv_i__j__kp2_3, *vv_i__j__kp2_4, *vv_i__j__kp2_5, *vv_i__j__kp2_6, *vv_i__j__kp2_7;
|
||||
int* vv_i__j__km2_0, *vv_i__j__km2_1, *vv_i__j__km2_2, *vv_i__j__km2_3, *vv_i__j__km2_4, *vv_i__j__km2_5, *vv_i__j__km2_6, *vv_i__j__km2_7;
|
||||
|
||||
// constants
|
||||
CUSTOMREAL* vv_fac_a_0, *vv_fac_a_1, *vv_fac_a_2, *vv_fac_a_3, *vv_fac_a_4, *vv_fac_a_5, *vv_fac_a_6, *vv_fac_a_7;
|
||||
CUSTOMREAL* vv_fac_b_0, *vv_fac_b_1, *vv_fac_b_2, *vv_fac_b_3, *vv_fac_b_4, *vv_fac_b_5, *vv_fac_b_6, *vv_fac_b_7;
|
||||
CUSTOMREAL* vv_fac_c_0, *vv_fac_c_1, *vv_fac_c_2, *vv_fac_c_3, *vv_fac_c_4, *vv_fac_c_5, *vv_fac_c_6, *vv_fac_c_7;
|
||||
CUSTOMREAL* vv_fac_f_0, *vv_fac_f_1, *vv_fac_f_2, *vv_fac_f_3, *vv_fac_f_4, *vv_fac_f_5, *vv_fac_f_6, *vv_fac_f_7;
|
||||
CUSTOMREAL* vv_T0v_0, *vv_T0v_1, *vv_T0v_2, *vv_T0v_3, *vv_T0v_4, *vv_T0v_5, *vv_T0v_6, *vv_T0v_7;
|
||||
CUSTOMREAL* vv_T0r_0, *vv_T0r_1, *vv_T0r_2, *vv_T0r_3, *vv_T0r_4, *vv_T0r_5, *vv_T0r_6, *vv_T0r_7;
|
||||
CUSTOMREAL* vv_T0t_0, *vv_T0t_1, *vv_T0t_2, *vv_T0t_3, *vv_T0t_4, *vv_T0t_5, *vv_T0t_6, *vv_T0t_7;
|
||||
CUSTOMREAL* vv_T0p_0, *vv_T0p_1, *vv_T0p_2, *vv_T0p_3, *vv_T0p_4, *vv_T0p_5, *vv_T0p_6, *vv_T0p_7;
|
||||
CUSTOMREAL* vv_fun_0, *vv_fun_1, *vv_fun_2, *vv_fun_3, *vv_fun_4, *vv_fun_5, *vv_fun_6, *vv_fun_7;
|
||||
bool* vv_change_0, *vv_change_1, *vv_change_2, *vv_change_3, *vv_change_4, *vv_change_5, *vv_change_6, *vv_change_7;
|
||||
|
||||
// temporary variables
|
||||
CUSTOMREAL* tau;
|
||||
|
||||
bool if_3rd_order = false;
|
||||
|
||||
// thead and grid for sweeping
|
||||
dim3 grid_sweep_host, threads_sweep_host;
|
||||
// array of streams
|
||||
cudaStream_t* level_streams;
|
||||
|
||||
|
||||
} Grid_on_device;
|
||||
|
||||
|
||||
void cuda_initialize_grid_1st(std::vector< std::vector<int> >& ijk, Grid_on_device* grid_dv, int const& loc_I, int const& loc_J, int const& loc_K,
|
||||
CUSTOMREAL const& dp, CUSTOMREAL const& dt, CUSTOMREAL const& dr, \
|
||||
std::vector<std::vector<int*>> & vv_i__j__k__, \
|
||||
std::vector<std::vector<int*>> & vv_ip1j__k__, \
|
||||
std::vector<std::vector<int*>> & vv_im1j__k__, \
|
||||
std::vector<std::vector<int*>> & vv_i__jp1k__, \
|
||||
std::vector<std::vector<int*>> & vv_i__jm1k__, \
|
||||
std::vector<std::vector<int*>> & vv_i__j__kp1, \
|
||||
std::vector<std::vector<int*>> & vv_i__j__km1, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_fac_a, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_fac_b, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_fac_c, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_fac_f, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_T0v, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_T0r, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_T0t, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_T0p, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_fun, \
|
||||
std::vector<std::vector<bool*>> & vv_change);
|
||||
|
||||
void cuda_initialize_grid_3rd(std::vector< std::vector<int> >& ijk, Grid_on_device* grid_dv, int const& loc_I, int const& loc_J, int const& loc_K,
|
||||
CUSTOMREAL const& dp, CUSTOMREAL const& dt, CUSTOMREAL const& dr, \
|
||||
std::vector<std::vector<int*>> & vv_i__j__k__, \
|
||||
std::vector<std::vector<int*>> & vv_ip1j__k__, \
|
||||
std::vector<std::vector<int*>> & vv_im1j__k__, \
|
||||
std::vector<std::vector<int*>> & vv_i__jp1k__, \
|
||||
std::vector<std::vector<int*>> & vv_i__jm1k__, \
|
||||
std::vector<std::vector<int*>> & vv_i__j__kp1, \
|
||||
std::vector<std::vector<int*>> & vv_i__j__km1, \
|
||||
std::vector<std::vector<int*>> & vv_ip2j__k__, \
|
||||
std::vector<std::vector<int*>> & vv_im2j__k__, \
|
||||
std::vector<std::vector<int*>> & vv_i__jp2k__, \
|
||||
std::vector<std::vector<int*>> & vv_i__jm2k__, \
|
||||
std::vector<std::vector<int*>> & vv_i__j__kp2, \
|
||||
std::vector<std::vector<int*>> & vv_i__j__km2, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_fac_a, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_fac_b, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_fac_c, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_fac_f, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_T0v, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_T0r, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_T0t, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_T0p, \
|
||||
std::vector<std::vector<CUSTOMREAL*>> & vv_fun, \
|
||||
std::vector<std::vector<bool*>> & vv_change);
|
||||
|
||||
|
||||
// finalize
|
||||
void cuda_finalize_grid(Grid_on_device* grid_dv);
|
||||
|
||||
// copy tau from host to device
|
||||
void cuda_copy_tau_to_device(Grid_on_device* grid_dv, CUSTOMREAL* tau_h);
|
||||
// copy tau from device to host
|
||||
void cuda_copy_tau_to_host(Grid_on_device* grid_dv, CUSTOMREAL* tau_h);
|
||||
|
||||
#endif // GRID_WRAPPER_CUH
|
||||
883
cuda/iterator_wrapper.cu
Normal file
883
cuda/iterator_wrapper.cu
Normal file
@@ -0,0 +1,883 @@
|
||||
#include "iterator_wrapper.cuh"
|
||||
|
||||
__device__ const CUSTOMREAL PLUS = 1.0;
|
||||
__device__ const CUSTOMREAL MINUS = -1.0;
|
||||
__device__ const CUSTOMREAL v_eps = 1e-12;
|
||||
|
||||
__device__ const CUSTOMREAL _0_5_CR = 0.5;
|
||||
__device__ const CUSTOMREAL _1_CR = 1.0;
|
||||
__device__ const CUSTOMREAL _2_CR = 2.0;
|
||||
__device__ const CUSTOMREAL _3_CR = 3.0;
|
||||
__device__ const CUSTOMREAL _4_CR = 4.0;
|
||||
|
||||
__device__ CUSTOMREAL my_square_cu(CUSTOMREAL const& x) {
|
||||
return x*x;
|
||||
}
|
||||
|
||||
__device__ CUSTOMREAL calc_stencil_1st(CUSTOMREAL const& a, CUSTOMREAL const& b, CUSTOMREAL const& Dinv){
|
||||
return Dinv*(a-b);
|
||||
}
|
||||
|
||||
__device__ CUSTOMREAL calc_stencil_3rd(CUSTOMREAL const& a, CUSTOMREAL const& b, CUSTOMREAL const& c, CUSTOMREAL const& d, CUSTOMREAL const& Dinv_half, CUSTOMREAL const& sign){
|
||||
CUSTOMREAL tmp1 = v_eps + my_square_cu(a-_2_CR*b+c);
|
||||
CUSTOMREAL tmp2 = v_eps + my_square_cu(d-_2_CR*a+b);
|
||||
CUSTOMREAL ww = _1_CR/(_1_CR+_2_CR*my_square_cu(tmp1/tmp2));
|
||||
return sign*((_1_CR-ww)* (b-d)*Dinv_half + ww*(-_3_CR*a+_4_CR*b-c)*Dinv_half);
|
||||
}
|
||||
|
||||
__device__ CUSTOMREAL cuda_calc_LF_Hamiltonian( \
|
||||
CUSTOMREAL const& fac_a_, \
|
||||
CUSTOMREAL const& fac_b_, \
|
||||
CUSTOMREAL const& fac_c_, \
|
||||
CUSTOMREAL const& fac_f_, \
|
||||
CUSTOMREAL const& T0r_, \
|
||||
CUSTOMREAL const& T0t_, \
|
||||
CUSTOMREAL const& T0p_, \
|
||||
CUSTOMREAL const& T0v_, \
|
||||
CUSTOMREAL& tau_, \
|
||||
CUSTOMREAL const& pp1, CUSTOMREAL& pp2, \
|
||||
CUSTOMREAL const& pt1, CUSTOMREAL& pt2, \
|
||||
CUSTOMREAL const& pr1, CUSTOMREAL& pr2 \
|
||||
) {
|
||||
// LF Hamiltonian for T = T0 * tau
|
||||
return sqrt(
|
||||
fac_a_ * my_square_cu(T0r_ * tau_ + T0v_ * (pr1+pr2)/_2_CR) \
|
||||
+ fac_b_ * my_square_cu(T0t_ * tau_ + T0v_ * (pt1+pt2)/_2_CR) \
|
||||
+ fac_c_ * my_square_cu(T0p_ * tau_ + T0v_ * (pp1+pp2)/_2_CR) \
|
||||
- _2_CR*fac_f_ * (T0t_ * tau_ + T0v_ * (pt1+pt2)/_2_CR) \
|
||||
* (T0p_ * tau_ + T0v_ * (pp1+pp2)/_2_CR) \
|
||||
);
|
||||
}
|
||||
|
||||
__global__ void cuda_do_sweep_level_kernel_1st(\
|
||||
const int i__j__k__[],\
|
||||
const int ip1j__k__[],\
|
||||
const int im1j__k__[],\
|
||||
const int i__jp1k__[],\
|
||||
const int i__jm1k__[],\
|
||||
const int i__j__kp1[],\
|
||||
const int i__j__km1[],\
|
||||
const CUSTOMREAL fac_a[], \
|
||||
const CUSTOMREAL fac_b[], \
|
||||
const CUSTOMREAL fac_c[], \
|
||||
const CUSTOMREAL fac_f[], \
|
||||
const CUSTOMREAL T0v[], \
|
||||
const CUSTOMREAL T0r[], \
|
||||
const CUSTOMREAL T0t[], \
|
||||
const CUSTOMREAL T0p[], \
|
||||
const CUSTOMREAL fun[], \
|
||||
const bool changed[], \
|
||||
CUSTOMREAL tau[], \
|
||||
const int loc_I, \
|
||||
const int loc_J, \
|
||||
const int loc_K, \
|
||||
const CUSTOMREAL dr, \
|
||||
const CUSTOMREAL dt, \
|
||||
const CUSTOMREAL dp, \
|
||||
const int n_nodes_this_level, \
|
||||
const int i_start \
|
||||
){
|
||||
|
||||
unsigned int i_node = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
|
||||
|
||||
if (i_node >= n_nodes_this_level) return;
|
||||
|
||||
i_node += i_start;
|
||||
|
||||
//if (i_node >= loc_I*loc_J*loc_K) return;
|
||||
|
||||
if (changed[i_node] != true) return;
|
||||
|
||||
CUSTOMREAL sigr = _1_CR*sqrt(fac_a[i_node])*T0v[i_node];
|
||||
CUSTOMREAL sigt = _1_CR*sqrt(fac_b[i_node])*T0v[i_node];
|
||||
CUSTOMREAL sigp = _1_CR*sqrt(fac_c[i_node])*T0v[i_node];
|
||||
CUSTOMREAL coe = _1_CR/((sigr/dr)+(sigt/dt)+(sigp/dp));
|
||||
|
||||
CUSTOMREAL pp1 = calc_stencil_1st(tau[i__j__k__[i_node]],tau[im1j__k__[i_node]], _1_CR/dp);
|
||||
CUSTOMREAL pp2 = calc_stencil_1st(tau[ip1j__k__[i_node]],tau[i__j__k__[i_node]], _1_CR/dp);
|
||||
|
||||
CUSTOMREAL pt1 = calc_stencil_1st(tau[i__j__k__[i_node]],tau[i__jm1k__[i_node]], _1_CR/dt);
|
||||
CUSTOMREAL pt2 = calc_stencil_1st(tau[i__jp1k__[i_node]],tau[i__j__k__[i_node]], _1_CR/dt);
|
||||
|
||||
CUSTOMREAL pr1 = calc_stencil_1st(tau[i__j__k__[i_node]],tau[i__j__km1[i_node]], _1_CR/dr);
|
||||
CUSTOMREAL pr2 = calc_stencil_1st(tau[i__j__kp1[i_node]],tau[i__j__k__[i_node]], _1_CR/dr);
|
||||
|
||||
// LF Hamiltonian
|
||||
CUSTOMREAL Htau = cuda_calc_LF_Hamiltonian(\
|
||||
fac_a[i_node], \
|
||||
fac_b[i_node], \
|
||||
fac_c[i_node], \
|
||||
fac_f[i_node], \
|
||||
T0r[i_node], \
|
||||
T0t[i_node], \
|
||||
T0p[i_node], \
|
||||
T0v[i_node], \
|
||||
tau[i__j__k__[i_node]], \
|
||||
pp1, pp2, pt1, pt2, pr1, pr2);
|
||||
|
||||
tau[i__j__k__[i_node]] += coe*((fun[i_node] - Htau) \
|
||||
+(sigr*(pr2-pr1) \
|
||||
+ sigt*(pt2-pt1) \
|
||||
+ sigp*(pp2-pp1))/_2_CR);
|
||||
|
||||
}
|
||||
|
||||
__global__ void cuda_do_sweep_level_kernel_3rd(\
|
||||
const int i__j__k__[],\
|
||||
const int ip1j__k__[],\
|
||||
const int im1j__k__[],\
|
||||
const int i__jp1k__[],\
|
||||
const int i__jm1k__[],\
|
||||
const int i__j__kp1[],\
|
||||
const int i__j__km1[],\
|
||||
const int ip2j__k__[],\
|
||||
const int im2j__k__[],\
|
||||
const int i__jp2k__[],\
|
||||
const int i__jm2k__[],\
|
||||
const int i__j__kp2[],\
|
||||
const int i__j__km2[],\
|
||||
const CUSTOMREAL fac_a[], \
|
||||
const CUSTOMREAL fac_b[], \
|
||||
const CUSTOMREAL fac_c[], \
|
||||
const CUSTOMREAL fac_f[], \
|
||||
const CUSTOMREAL T0v[], \
|
||||
const CUSTOMREAL T0r[], \
|
||||
const CUSTOMREAL T0t[], \
|
||||
const CUSTOMREAL T0p[], \
|
||||
const CUSTOMREAL fun[], \
|
||||
const bool changed[], \
|
||||
CUSTOMREAL tau[], \
|
||||
const int loc_I, \
|
||||
const int loc_J, \
|
||||
const int loc_K, \
|
||||
const CUSTOMREAL dr, \
|
||||
const CUSTOMREAL dt, \
|
||||
const CUSTOMREAL dp, \
|
||||
const int n_nodes_this_level, \
|
||||
const int i_start \
|
||||
){
|
||||
|
||||
CUSTOMREAL pp1, pp2, pt1, pt2, pr1, pr2;
|
||||
|
||||
unsigned int i_node = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
|
||||
|
||||
if (i_node >= n_nodes_this_level) return;
|
||||
|
||||
i_node += i_start;
|
||||
//if (i_node >= loc_I*loc_J*loc_K) return;
|
||||
|
||||
if (changed[i_node] != true) return;
|
||||
|
||||
int k = i__j__k__[i_node] / (loc_I*loc_J);
|
||||
int j = (i__j__k__[i_node] - k*loc_I*loc_J)/loc_I;
|
||||
int i = i__j__k__[i_node] - k*loc_I*loc_J - j*loc_I;
|
||||
|
||||
|
||||
CUSTOMREAL DRinv = _1_CR/dr;
|
||||
CUSTOMREAL DTinv = _1_CR/dt;
|
||||
CUSTOMREAL DPinv = _1_CR/dp;
|
||||
CUSTOMREAL DRinv_half = DRinv*_0_5_CR;
|
||||
CUSTOMREAL DTinv_half = DTinv*_0_5_CR;
|
||||
CUSTOMREAL DPinv_half = DPinv*_0_5_CR;
|
||||
|
||||
CUSTOMREAL sigr = _1_CR*sqrt(fac_a[i_node])*T0v[i_node];
|
||||
CUSTOMREAL sigt = _1_CR*sqrt(fac_b[i_node])*T0v[i_node];
|
||||
CUSTOMREAL sigp = _1_CR*sqrt(fac_c[i_node])*T0v[i_node];
|
||||
CUSTOMREAL coe = _1_CR/((sigr/dr)+(sigt/dt)+(sigp/dp));
|
||||
|
||||
// direction p
|
||||
if (i == 1) {
|
||||
pp1 = calc_stencil_1st(tau[i__j__k__[i_node]],tau[im1j__k__[i_node]],DPinv);
|
||||
pp2 = calc_stencil_3rd(tau[i__j__k__[i_node]],tau[ip1j__k__[i_node]],tau[ip2j__k__[i_node]],tau[im1j__k__[i_node]],DPinv_half, PLUS);
|
||||
} else if (i == loc_I-2) {
|
||||
pp1 = calc_stencil_3rd(tau[i__j__k__[i_node]],tau[im1j__k__[i_node]],tau[im2j__k__[i_node]],tau[ip1j__k__[i_node]],DPinv_half, MINUS);
|
||||
pp2 = calc_stencil_1st(tau[ip1j__k__[i_node]],tau[i__j__k__[i_node]],DPinv);
|
||||
} else {
|
||||
pp1 = calc_stencil_3rd(tau[i__j__k__[i_node]],tau[im1j__k__[i_node]],tau[im2j__k__[i_node]],tau[ip1j__k__[i_node]],DPinv_half, MINUS);
|
||||
pp2 = calc_stencil_3rd(tau[i__j__k__[i_node]],tau[ip1j__k__[i_node]],tau[ip2j__k__[i_node]],tau[im1j__k__[i_node]],DPinv_half, PLUS);
|
||||
}
|
||||
|
||||
// direction t
|
||||
if (j == 1) {
|
||||
pt1 = calc_stencil_1st(tau[i__j__k__[i_node]],tau[i__jm1k__[i_node]],DTinv);
|
||||
pt2 = calc_stencil_3rd(tau[i__j__k__[i_node]],tau[i__jp1k__[i_node]],tau[i__jp2k__[i_node]],tau[i__jm1k__[i_node]],DTinv_half, PLUS);
|
||||
} else if (j == loc_J-2) {
|
||||
pt1 = calc_stencil_3rd(tau[i__j__k__[i_node]],tau[i__jm1k__[i_node]],tau[i__jm2k__[i_node]],tau[i__jp1k__[i_node]],DTinv_half, MINUS);
|
||||
pt2 = calc_stencil_1st(tau[i__jp1k__[i_node]],tau[i__j__k__[i_node]],DTinv);
|
||||
} else {
|
||||
pt1 = calc_stencil_3rd(tau[i__j__k__[i_node]],tau[i__jm1k__[i_node]],tau[i__jm2k__[i_node]],tau[i__jp1k__[i_node]],DTinv_half, MINUS);
|
||||
pt2 = calc_stencil_3rd(tau[i__j__k__[i_node]],tau[i__jp1k__[i_node]],tau[i__jp2k__[i_node]],tau[i__jm1k__[i_node]],DTinv_half, PLUS);
|
||||
}
|
||||
|
||||
// direction r
|
||||
if (k == 1) {
|
||||
pr1 = calc_stencil_1st(tau[i__j__k__[i_node]],tau[i__j__km1[i_node]],DRinv);
|
||||
pr2 = calc_stencil_3rd(tau[i__j__k__[i_node]],tau[i__j__kp1[i_node]],tau[i__j__kp2[i_node]],tau[i__j__km1[i_node]],DRinv_half, PLUS);
|
||||
} else if (k == loc_K-2) {
|
||||
pr1 = calc_stencil_3rd(tau[i__j__k__[i_node]],tau[i__j__km1[i_node]],tau[i__j__km2[i_node]],tau[i__j__kp1[i_node]],DRinv_half, MINUS);
|
||||
pr2 = calc_stencil_1st(tau[i__j__kp1[i_node]],tau[i__j__k__[i_node]],DRinv);
|
||||
} else {
|
||||
pr1 = calc_stencil_3rd(tau[i__j__k__[i_node]],tau[i__j__km1[i_node]],tau[i__j__km2[i_node]],tau[i__j__kp1[i_node]],DRinv_half, MINUS);
|
||||
pr2 = calc_stencil_3rd(tau[i__j__k__[i_node]],tau[i__j__kp1[i_node]],tau[i__j__kp2[i_node]],tau[i__j__km1[i_node]],DRinv_half, PLUS);
|
||||
}
|
||||
|
||||
CUSTOMREAL Htau = cuda_calc_LF_Hamiltonian(\
|
||||
fac_a[i_node], \
|
||||
fac_b[i_node], \
|
||||
fac_c[i_node], \
|
||||
fac_f[i_node], \
|
||||
T0r[i_node], \
|
||||
T0t[i_node], \
|
||||
T0p[i_node], \
|
||||
T0v[i_node], \
|
||||
tau[i__j__k__[i_node]], \
|
||||
pp1, pp2, pt1, pt2, pr1, pr2);
|
||||
|
||||
tau[i__j__k__[i_node]] += coe*((fun[i_node] - Htau) \
|
||||
+(sigr*(pr2-pr1) \
|
||||
+ sigt*(pt2-pt1) \
|
||||
+ sigp*(pp2-pp1))/_2_CR);
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
void initialize_sweep_params(Grid_on_device* grid_dv){
|
||||
|
||||
// check the numBlockPerSm and set the block size accordingly
|
||||
//int numBlocksPerSm = 0;
|
||||
//int block_size = CUDA_SWEEPING_BLOCK_SIZE;
|
||||
|
||||
//int device;
|
||||
//cudaGetDevice(&device);
|
||||
|
||||
//cudaDeviceProp deviceProp;
|
||||
//cudaGetDeviceProperties(&deviceProp, device);
|
||||
//if(grid_dv->if_3rd_order)
|
||||
// cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocksPerSm, cuda_do_sweep_level_kernel_3rd, CUDA_SWEEPING_BLOCK_SIZE, 0);
|
||||
//else
|
||||
// cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocksPerSm, cuda_do_sweep_level_kernel_1st, CUDA_SWEEPING_BLOCK_SIZE, 0);
|
||||
|
||||
//int max_cooperative_blocks = deviceProp.multiProcessorCount*numBlocksPerSm;
|
||||
|
||||
//grid_dv->threads_sweep_host = dim3(block_size, 1, 1);
|
||||
//grid_dv->grid_sweep_host = dim3(max_cooperative_blocks, 1, 1);
|
||||
|
||||
// spawn streams
|
||||
//grid_dv->level_streams = (cudaStream_t*)malloc(CUDA_MAX_NUM_STREAMS*sizeof(cudaStream_t));
|
||||
//for (int i = 0; i < CUDA_MAX_NUM_STREAMS; i++) {
|
||||
grid_dv->level_streams = (cudaStream_t*)malloc(grid_dv->n_levels_host*sizeof(cudaStream_t));
|
||||
for (int i = 0; i < grid_dv->n_levels_host; i++) {
|
||||
//cudaStreamCreate(&(grid_dv->level_streams[i]));
|
||||
// add null
|
||||
//cudaStreamCreateWithFlags(&(grid_dv->level_streams[i]), cudaStreamNonBlocking);
|
||||
grid_dv->level_streams[i] = nullptr;
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
void finalize_sweep_params(Grid_on_device* grid_on_dv){
|
||||
// destroy streams
|
||||
//for (int i = 0; i < CUDA_MAX_NUM_STREAMS; i++) {
|
||||
//for (int i = 0; i < grid_on_dv->n_levels_host; i++) {
|
||||
// cudaStreamDestroy(grid_on_dv->level_streams[i]);
|
||||
//}
|
||||
|
||||
free(grid_on_dv->level_streams);
|
||||
}
|
||||
|
||||
|
||||
void run_kernel(Grid_on_device* grid_dv, int const& iswp, int& i_node_offset, int const& i_level, \
|
||||
dim3& grid_each, dim3& threads_each, int& n_nodes_this_level){
|
||||
|
||||
int id_stream = i_level;// % CUDA_MAX_NUM_STREAMS;
|
||||
|
||||
if (grid_dv->if_3rd_order) {
|
||||
if (iswp == 0){
|
||||
void *kernelArgs[]{\
|
||||
&(grid_dv->vv_i__j__k___0), \
|
||||
&(grid_dv->vv_ip1j__k___0), \
|
||||
&(grid_dv->vv_im1j__k___0), \
|
||||
&(grid_dv->vv_i__jp1k___0), \
|
||||
&(grid_dv->vv_i__jm1k___0), \
|
||||
&(grid_dv->vv_i__j__kp1_0), \
|
||||
&(grid_dv->vv_i__j__km1_0), \
|
||||
&(grid_dv->vv_ip2j__k___0), \
|
||||
&(grid_dv->vv_im2j__k___0), \
|
||||
&(grid_dv->vv_i__jp2k___0), \
|
||||
&(grid_dv->vv_i__jm2k___0), \
|
||||
&(grid_dv->vv_i__j__kp2_0), \
|
||||
&(grid_dv->vv_i__j__km2_0), \
|
||||
&(grid_dv->vv_fac_a_0 ), \
|
||||
&(grid_dv->vv_fac_b_0 ), \
|
||||
&(grid_dv->vv_fac_c_0 ), \
|
||||
&(grid_dv->vv_fac_f_0 ), \
|
||||
&(grid_dv->vv_T0v_0 ), \
|
||||
&(grid_dv->vv_T0r_0 ), \
|
||||
&(grid_dv->vv_T0t_0 ), \
|
||||
&(grid_dv->vv_T0p_0 ), \
|
||||
&(grid_dv->vv_fun_0 ), \
|
||||
&(grid_dv->vv_change_0 ), \
|
||||
&(grid_dv->tau), \
|
||||
&(grid_dv->loc_I_host), \
|
||||
&(grid_dv->loc_J_host), \
|
||||
&(grid_dv->loc_K_host), \
|
||||
&(grid_dv->dr_host), \
|
||||
&(grid_dv->dt_host), \
|
||||
&(grid_dv->dp_host), \
|
||||
&n_nodes_this_level, \
|
||||
&i_node_offset \
|
||||
};
|
||||
print_CUDA_error_if_any(cudaLaunchKernel((void*) cuda_do_sweep_level_kernel_3rd, grid_each, threads_each, kernelArgs, 0, grid_dv->level_streams[id_stream]), 30001);
|
||||
|
||||
} else if (iswp == 1){
|
||||
void* kernelArgs[]{\
|
||||
&(grid_dv->vv_i__j__k___1), \
|
||||
&(grid_dv->vv_i__jp1k___1), \
|
||||
&(grid_dv->vv_i__jm1k___1), \
|
||||
&(grid_dv->vv_i__j__kp1_1), \
|
||||
&(grid_dv->vv_i__j__km1_1), \
|
||||
&(grid_dv->vv_ip1j__k___1), \
|
||||
&(grid_dv->vv_im1j__k___1), \
|
||||
&(grid_dv->vv_ip2j__k___1), \
|
||||
&(grid_dv->vv_im2j__k___1), \
|
||||
&(grid_dv->vv_i__jp2k___1), \
|
||||
&(grid_dv->vv_i__jm2k___1), \
|
||||
&(grid_dv->vv_i__j__kp2_1), \
|
||||
&(grid_dv->vv_i__j__km2_1), \
|
||||
&(grid_dv->vv_fac_a_1 ), \
|
||||
&(grid_dv->vv_fac_b_1 ), \
|
||||
&(grid_dv->vv_fac_c_1 ), \
|
||||
&(grid_dv->vv_fac_f_1 ), \
|
||||
&(grid_dv->vv_T0v_1 ), \
|
||||
&(grid_dv->vv_T0r_1 ), \
|
||||
&(grid_dv->vv_T0t_1 ), \
|
||||
&(grid_dv->vv_T0p_1 ), \
|
||||
&(grid_dv->vv_fun_1 ), \
|
||||
&(grid_dv->vv_change_1 ), \
|
||||
&(grid_dv->tau), \
|
||||
&(grid_dv->loc_I_host), \
|
||||
&(grid_dv->loc_J_host), \
|
||||
&(grid_dv->loc_K_host), \
|
||||
&(grid_dv->dr_host), \
|
||||
&(grid_dv->dt_host), \
|
||||
&(grid_dv->dp_host), \
|
||||
&n_nodes_this_level, \
|
||||
&i_node_offset \
|
||||
};
|
||||
print_CUDA_error_if_any(cudaLaunchKernel((void*) cuda_do_sweep_level_kernel_3rd, grid_each, threads_each, kernelArgs, 0, grid_dv->level_streams[id_stream]), 30001);
|
||||
|
||||
} else if (iswp == 2){
|
||||
void* kernelArgs[]{\
|
||||
&(grid_dv->vv_i__j__k___2), \
|
||||
&(grid_dv->vv_i__j__kp1_2), \
|
||||
&(grid_dv->vv_i__j__km1_2), \
|
||||
&(grid_dv->vv_ip1j__k___2), \
|
||||
&(grid_dv->vv_im1j__k___2), \
|
||||
&(grid_dv->vv_i__jp1k___2), \
|
||||
&(grid_dv->vv_i__jm1k___2), \
|
||||
&(grid_dv->vv_ip2j__k___2), \
|
||||
&(grid_dv->vv_im2j__k___2), \
|
||||
&(grid_dv->vv_i__jp2k___2), \
|
||||
&(grid_dv->vv_i__jm2k___2), \
|
||||
&(grid_dv->vv_i__j__kp2_2), \
|
||||
&(grid_dv->vv_i__j__km2_2), \
|
||||
&(grid_dv->vv_fac_a_2), \
|
||||
&(grid_dv->vv_fac_b_2), \
|
||||
&(grid_dv->vv_fac_c_2), \
|
||||
&(grid_dv->vv_fac_f_2), \
|
||||
&(grid_dv->vv_T0v_2), \
|
||||
&(grid_dv->vv_T0r_2), \
|
||||
&(grid_dv->vv_T0t_2), \
|
||||
&(grid_dv->vv_T0p_2), \
|
||||
&(grid_dv->vv_fun_2), \
|
||||
&(grid_dv->vv_change_2), \
|
||||
&(grid_dv->tau), \
|
||||
&(grid_dv->loc_I_host), \
|
||||
&(grid_dv->loc_J_host), \
|
||||
&(grid_dv->loc_K_host), \
|
||||
&(grid_dv->dr_host), \
|
||||
&(grid_dv->dt_host), \
|
||||
&(grid_dv->dp_host), \
|
||||
&n_nodes_this_level, \
|
||||
&i_node_offset \
|
||||
};
|
||||
print_CUDA_error_if_any(cudaLaunchKernel((void*) cuda_do_sweep_level_kernel_3rd, grid_each, threads_each, kernelArgs, 0, grid_dv->level_streams[id_stream]), 30001);
|
||||
|
||||
} else if (iswp == 3){
|
||||
void* kernelArgs[]{\
|
||||
&(grid_dv->vv_i__j__k___3), \
|
||||
&(grid_dv->vv_ip1j__k___3), \
|
||||
&(grid_dv->vv_im1j__k___3), \
|
||||
&(grid_dv->vv_i__jp1k___3), \
|
||||
&(grid_dv->vv_i__jm1k___3), \
|
||||
&(grid_dv->vv_i__j__kp1_3), \
|
||||
&(grid_dv->vv_i__j__km1_3), \
|
||||
&(grid_dv->vv_ip2j__k___3), \
|
||||
&(grid_dv->vv_im2j__k___3), \
|
||||
&(grid_dv->vv_i__jp2k___3), \
|
||||
&(grid_dv->vv_i__jm2k___3), \
|
||||
&(grid_dv->vv_i__j__kp2_3), \
|
||||
&(grid_dv->vv_i__j__km2_3), \
|
||||
&(grid_dv->vv_fac_a_3), \
|
||||
&(grid_dv->vv_fac_b_3), \
|
||||
&(grid_dv->vv_fac_c_3), \
|
||||
&(grid_dv->vv_fac_f_3), \
|
||||
&(grid_dv->vv_T0v_3), \
|
||||
&(grid_dv->vv_T0r_3), \
|
||||
&(grid_dv->vv_T0t_3), \
|
||||
&(grid_dv->vv_T0p_3), \
|
||||
&(grid_dv->vv_fun_3), \
|
||||
&(grid_dv->vv_change_3), \
|
||||
&(grid_dv->tau), \
|
||||
&(grid_dv->loc_I_host), \
|
||||
&(grid_dv->loc_J_host), \
|
||||
&(grid_dv->loc_K_host), \
|
||||
&(grid_dv->dr_host), \
|
||||
&(grid_dv->dt_host), \
|
||||
&(grid_dv->dp_host), \
|
||||
&n_nodes_this_level, \
|
||||
&i_node_offset \
|
||||
};
|
||||
print_CUDA_error_if_any(cudaLaunchKernel((void*) cuda_do_sweep_level_kernel_3rd, grid_each, threads_each, kernelArgs, 0, grid_dv->level_streams[id_stream]), 30001);
|
||||
|
||||
} else if (iswp == 4){
|
||||
void* kernelArgs[]{\
|
||||
&(grid_dv->vv_i__j__k___4), \
|
||||
&(grid_dv->vv_ip1j__k___4), \
|
||||
&(grid_dv->vv_im1j__k___4), \
|
||||
&(grid_dv->vv_i__jp1k___4), \
|
||||
&(grid_dv->vv_i__jm1k___4), \
|
||||
&(grid_dv->vv_i__j__kp1_4), \
|
||||
&(grid_dv->vv_i__j__km1_4), \
|
||||
&(grid_dv->vv_ip2j__k___4), \
|
||||
&(grid_dv->vv_im2j__k___4), \
|
||||
&(grid_dv->vv_i__jp2k___4), \
|
||||
&(grid_dv->vv_i__jm2k___4), \
|
||||
&(grid_dv->vv_i__j__kp2_4), \
|
||||
&(grid_dv->vv_i__j__km2_4), \
|
||||
&(grid_dv->vv_fac_a_4), \
|
||||
&(grid_dv->vv_fac_b_4), \
|
||||
&(grid_dv->vv_fac_c_4), \
|
||||
&(grid_dv->vv_fac_f_4), \
|
||||
&(grid_dv->vv_T0v_4), \
|
||||
&(grid_dv->vv_T0r_4), \
|
||||
&(grid_dv->vv_T0t_4), \
|
||||
&(grid_dv->vv_T0p_4), \
|
||||
&(grid_dv->vv_fun_4), \
|
||||
&(grid_dv->vv_change_4), \
|
||||
&(grid_dv->tau), \
|
||||
&(grid_dv->loc_I_host), \
|
||||
&(grid_dv->loc_J_host), \
|
||||
&(grid_dv->loc_K_host), \
|
||||
&(grid_dv->dr_host), \
|
||||
&(grid_dv->dt_host), \
|
||||
&(grid_dv->dp_host), \
|
||||
&n_nodes_this_level, \
|
||||
&i_node_offset \
|
||||
};
|
||||
print_CUDA_error_if_any(cudaLaunchKernel((void*) cuda_do_sweep_level_kernel_3rd, grid_each, threads_each, kernelArgs, 0, grid_dv->level_streams[id_stream]), 30001);
|
||||
|
||||
} else if (iswp == 5) {
|
||||
void* kernelArgs[]{\
|
||||
&(grid_dv->vv_i__j__k___5), \
|
||||
&(grid_dv->vv_ip1j__k___5), \
|
||||
&(grid_dv->vv_im1j__k___5), \
|
||||
&(grid_dv->vv_i__jp1k___5), \
|
||||
&(grid_dv->vv_i__jm1k___5), \
|
||||
&(grid_dv->vv_i__j__kp1_5), \
|
||||
&(grid_dv->vv_i__j__km1_5), \
|
||||
&(grid_dv->vv_ip2j__k___5), \
|
||||
&(grid_dv->vv_im2j__k___5), \
|
||||
&(grid_dv->vv_i__jp2k___5), \
|
||||
&(grid_dv->vv_i__jm2k___5), \
|
||||
&(grid_dv->vv_i__j__kp2_5), \
|
||||
&(grid_dv->vv_i__j__km2_5), \
|
||||
&(grid_dv->vv_fac_a_5), \
|
||||
&(grid_dv->vv_fac_b_5), \
|
||||
&(grid_dv->vv_fac_c_5), \
|
||||
&(grid_dv->vv_fac_f_5), \
|
||||
&(grid_dv->vv_T0v_5), \
|
||||
&(grid_dv->vv_T0r_5), \
|
||||
&(grid_dv->vv_T0t_5), \
|
||||
&(grid_dv->vv_T0p_5), \
|
||||
&(grid_dv->vv_fun_5), \
|
||||
&(grid_dv->vv_change_5), \
|
||||
&(grid_dv->tau), \
|
||||
&(grid_dv->loc_I_host), \
|
||||
&(grid_dv->loc_J_host), \
|
||||
&(grid_dv->loc_K_host), \
|
||||
&(grid_dv->dr_host), \
|
||||
&(grid_dv->dt_host), \
|
||||
&(grid_dv->dp_host), \
|
||||
&n_nodes_this_level, \
|
||||
&i_node_offset \
|
||||
};
|
||||
print_CUDA_error_if_any(cudaLaunchKernel((void*) cuda_do_sweep_level_kernel_3rd, grid_each, threads_each, kernelArgs, 0, grid_dv->level_streams[id_stream]), 30001);
|
||||
|
||||
} else if (iswp == 6) {
|
||||
void* kernelArgs[]{\
|
||||
&(grid_dv->vv_i__j__k___6), \
|
||||
&(grid_dv->vv_ip1j__k___6), \
|
||||
&(grid_dv->vv_im1j__k___6), \
|
||||
&(grid_dv->vv_i__jp1k___6), \
|
||||
&(grid_dv->vv_i__jm1k___6), \
|
||||
&(grid_dv->vv_i__j__kp1_6), \
|
||||
&(grid_dv->vv_i__j__km1_6), \
|
||||
&(grid_dv->vv_ip2j__k___6), \
|
||||
&(grid_dv->vv_im2j__k___6), \
|
||||
&(grid_dv->vv_i__jp2k___6), \
|
||||
&(grid_dv->vv_i__jm2k___6), \
|
||||
&(grid_dv->vv_i__j__kp2_6), \
|
||||
&(grid_dv->vv_i__j__km2_6), \
|
||||
&(grid_dv->vv_fac_a_6), \
|
||||
&(grid_dv->vv_fac_b_6), \
|
||||
&(grid_dv->vv_fac_c_6), \
|
||||
&(grid_dv->vv_fac_f_6), \
|
||||
&(grid_dv->vv_T0v_6), \
|
||||
&(grid_dv->vv_T0r_6), \
|
||||
&(grid_dv->vv_T0t_6), \
|
||||
&(grid_dv->vv_T0p_6), \
|
||||
&(grid_dv->vv_fun_6), \
|
||||
&(grid_dv->vv_change_6), \
|
||||
&(grid_dv->tau), \
|
||||
&(grid_dv->loc_I_host), \
|
||||
&(grid_dv->loc_J_host), \
|
||||
&(grid_dv->loc_K_host), \
|
||||
&(grid_dv->dr_host), \
|
||||
&(grid_dv->dt_host), \
|
||||
&(grid_dv->dp_host), \
|
||||
&n_nodes_this_level, \
|
||||
&i_node_offset \
|
||||
};
|
||||
print_CUDA_error_if_any(cudaLaunchKernel((void*) cuda_do_sweep_level_kernel_3rd, grid_each, threads_each, kernelArgs, 0, grid_dv->level_streams[id_stream]), 30001);
|
||||
|
||||
} else {
|
||||
void* kernelArgs[]{\
|
||||
&(grid_dv->vv_i__j__k___7), \
|
||||
&(grid_dv->vv_ip1j__k___7), \
|
||||
&(grid_dv->vv_im1j__k___7), \
|
||||
&(grid_dv->vv_i__jp1k___7), \
|
||||
&(grid_dv->vv_i__jm1k___7), \
|
||||
&(grid_dv->vv_i__j__kp1_7), \
|
||||
&(grid_dv->vv_i__j__km1_7), \
|
||||
&(grid_dv->vv_ip2j__k___7), \
|
||||
&(grid_dv->vv_im2j__k___7), \
|
||||
&(grid_dv->vv_i__jp2k___7), \
|
||||
&(grid_dv->vv_i__jm2k___7), \
|
||||
&(grid_dv->vv_i__j__kp2_7), \
|
||||
&(grid_dv->vv_i__j__km2_7), \
|
||||
&(grid_dv->vv_fac_a_7), \
|
||||
&(grid_dv->vv_fac_b_7), \
|
||||
&(grid_dv->vv_fac_c_7), \
|
||||
&(grid_dv->vv_fac_f_7), \
|
||||
&(grid_dv->vv_T0v_7), \
|
||||
&(grid_dv->vv_T0r_7), \
|
||||
&(grid_dv->vv_T0t_7), \
|
||||
&(grid_dv->vv_T0p_7), \
|
||||
&(grid_dv->vv_fun_7), \
|
||||
&(grid_dv->vv_change_7), \
|
||||
&(grid_dv->tau), \
|
||||
&(grid_dv->loc_I_host), \
|
||||
&(grid_dv->loc_J_host), \
|
||||
&(grid_dv->loc_K_host), \
|
||||
&(grid_dv->dr_host), \
|
||||
&(grid_dv->dt_host), \
|
||||
&(grid_dv->dp_host), \
|
||||
&n_nodes_this_level, \
|
||||
&i_node_offset \
|
||||
};
|
||||
print_CUDA_error_if_any(cudaLaunchKernel((void*) cuda_do_sweep_level_kernel_3rd, grid_each, threads_each, kernelArgs, 0, grid_dv->level_streams[id_stream]), 30001);
|
||||
|
||||
}
|
||||
} else { // 1st order
|
||||
if (iswp == 0){
|
||||
void* kernelArgs[]{\
|
||||
&(grid_dv->vv_i__j__k___0), \
|
||||
&(grid_dv->vv_ip1j__k___0), \
|
||||
&(grid_dv->vv_im1j__k___0), \
|
||||
&(grid_dv->vv_i__jp1k___0), \
|
||||
&(grid_dv->vv_i__jm1k___0), \
|
||||
&(grid_dv->vv_i__j__kp1_0), \
|
||||
&(grid_dv->vv_i__j__km1_0), \
|
||||
&(grid_dv->vv_fac_a_0), \
|
||||
&(grid_dv->vv_fac_b_0), \
|
||||
&(grid_dv->vv_fac_c_0), \
|
||||
&(grid_dv->vv_fac_f_0), \
|
||||
&(grid_dv->vv_T0v_0), \
|
||||
&(grid_dv->vv_T0r_0), \
|
||||
&(grid_dv->vv_T0t_0), \
|
||||
&(grid_dv->vv_T0p_0), \
|
||||
&(grid_dv->vv_fun_0), \
|
||||
&(grid_dv->vv_change_0), \
|
||||
&(grid_dv->tau), \
|
||||
&(grid_dv->loc_I_host), \
|
||||
&(grid_dv->loc_J_host), \
|
||||
&(grid_dv->loc_K_host), \
|
||||
&(grid_dv->dr_host), \
|
||||
&(grid_dv->dt_host), \
|
||||
&(grid_dv->dp_host), \
|
||||
&n_nodes_this_level, \
|
||||
&i_node_offset \
|
||||
};
|
||||
print_CUDA_error_if_any(cudaLaunchKernel((void*) cuda_do_sweep_level_kernel_1st, grid_each, threads_each, kernelArgs, 0, grid_dv->level_streams[id_stream]), 30000);
|
||||
|
||||
} else if (iswp == 1){
|
||||
void* kernelArgs[]{\
|
||||
&(grid_dv->vv_i__j__k___1), \
|
||||
&(grid_dv->vv_i__jp1k___1), \
|
||||
&(grid_dv->vv_i__jm1k___1), \
|
||||
&(grid_dv->vv_i__j__kp1_1), \
|
||||
&(grid_dv->vv_i__j__km1_1), \
|
||||
&(grid_dv->vv_ip1j__k___1), \
|
||||
&(grid_dv->vv_im1j__k___1), \
|
||||
&(grid_dv->vv_fac_a_1), \
|
||||
&(grid_dv->vv_fac_b_1), \
|
||||
&(grid_dv->vv_fac_c_1), \
|
||||
&(grid_dv->vv_fac_f_1), \
|
||||
&(grid_dv->vv_T0v_1), \
|
||||
&(grid_dv->vv_T0r_1), \
|
||||
&(grid_dv->vv_T0t_1), \
|
||||
&(grid_dv->vv_T0p_1), \
|
||||
&(grid_dv->vv_fun_1), \
|
||||
&(grid_dv->vv_change_1), \
|
||||
&(grid_dv->tau), \
|
||||
&(grid_dv->loc_I_host), \
|
||||
&(grid_dv->loc_J_host), \
|
||||
&(grid_dv->loc_K_host), \
|
||||
&(grid_dv->dr_host), \
|
||||
&(grid_dv->dt_host), \
|
||||
&(grid_dv->dp_host), \
|
||||
&n_nodes_this_level, \
|
||||
&i_node_offset \
|
||||
};
|
||||
print_CUDA_error_if_any(cudaLaunchKernel((void*) cuda_do_sweep_level_kernel_1st, grid_each, threads_each, kernelArgs, 0, grid_dv->level_streams[id_stream]), 30001);
|
||||
|
||||
} else if (iswp == 2){
|
||||
void* kernelArgs[]{\
|
||||
&(grid_dv->vv_i__j__k___2), \
|
||||
&(grid_dv->vv_i__j__kp1_2), \
|
||||
&(grid_dv->vv_i__j__km1_2), \
|
||||
&(grid_dv->vv_ip1j__k___2), \
|
||||
&(grid_dv->vv_im1j__k___2), \
|
||||
&(grid_dv->vv_i__jp1k___2), \
|
||||
&(grid_dv->vv_i__jm1k___2), \
|
||||
&(grid_dv->vv_fac_a_2), \
|
||||
&(grid_dv->vv_fac_b_2), \
|
||||
&(grid_dv->vv_fac_c_2), \
|
||||
&(grid_dv->vv_fac_f_2), \
|
||||
&(grid_dv->vv_T0v_2), \
|
||||
&(grid_dv->vv_T0r_2), \
|
||||
&(grid_dv->vv_T0t_2), \
|
||||
&(grid_dv->vv_T0p_2), \
|
||||
&(grid_dv->vv_fun_2), \
|
||||
&(grid_dv->vv_change_2), \
|
||||
&(grid_dv->tau), \
|
||||
&(grid_dv->loc_I_host), \
|
||||
&(grid_dv->loc_J_host), \
|
||||
&(grid_dv->loc_K_host), \
|
||||
&(grid_dv->dr_host), \
|
||||
&(grid_dv->dt_host), \
|
||||
&(grid_dv->dp_host), \
|
||||
&n_nodes_this_level, \
|
||||
&i_node_offset \
|
||||
};
|
||||
print_CUDA_error_if_any(cudaLaunchKernel((void*) cuda_do_sweep_level_kernel_1st, grid_each, threads_each, kernelArgs, 0, grid_dv->level_streams[id_stream]), 30002);
|
||||
|
||||
} else if (iswp == 3){
|
||||
void* kernelArgs[]{\
|
||||
&(grid_dv->vv_i__j__k___3), \
|
||||
&(grid_dv->vv_ip1j__k___3), \
|
||||
&(grid_dv->vv_im1j__k___3), \
|
||||
&(grid_dv->vv_i__jp1k___3), \
|
||||
&(grid_dv->vv_i__jm1k___3), \
|
||||
&(grid_dv->vv_i__j__kp1_3), \
|
||||
&(grid_dv->vv_i__j__km1_3), \
|
||||
&(grid_dv->vv_fac_a_3), \
|
||||
&(grid_dv->vv_fac_b_3), \
|
||||
&(grid_dv->vv_fac_c_3), \
|
||||
&(grid_dv->vv_fac_f_3), \
|
||||
&(grid_dv->vv_T0v_3), \
|
||||
&(grid_dv->vv_T0r_3), \
|
||||
&(grid_dv->vv_T0t_3), \
|
||||
&(grid_dv->vv_T0p_3), \
|
||||
&(grid_dv->vv_fun_3), \
|
||||
&(grid_dv->vv_change_3), \
|
||||
&(grid_dv->tau), \
|
||||
&(grid_dv->loc_I_host), \
|
||||
&(grid_dv->loc_J_host), \
|
||||
&(grid_dv->loc_K_host), \
|
||||
&(grid_dv->dr_host), \
|
||||
&(grid_dv->dt_host), \
|
||||
&(grid_dv->dp_host), \
|
||||
&n_nodes_this_level, \
|
||||
&i_node_offset \
|
||||
};
|
||||
print_CUDA_error_if_any(cudaLaunchKernel((void*) cuda_do_sweep_level_kernel_1st, grid_each, threads_each, kernelArgs, 0, grid_dv->level_streams[id_stream]), 30003);
|
||||
|
||||
} else if (iswp == 4){
|
||||
void* kernelArgs[]{\
|
||||
&(grid_dv->vv_i__j__k___4), \
|
||||
&(grid_dv->vv_ip1j__k___4), \
|
||||
&(grid_dv->vv_im1j__k___4), \
|
||||
&(grid_dv->vv_i__jp1k___4), \
|
||||
&(grid_dv->vv_i__jm1k___4), \
|
||||
&(grid_dv->vv_i__j__kp1_4), \
|
||||
&(grid_dv->vv_i__j__km1_4), \
|
||||
&(grid_dv->vv_fac_a_4), \
|
||||
&(grid_dv->vv_fac_b_4), \
|
||||
&(grid_dv->vv_fac_c_4), \
|
||||
&(grid_dv->vv_fac_f_4), \
|
||||
&(grid_dv->vv_T0v_4), \
|
||||
&(grid_dv->vv_T0r_4), \
|
||||
&(grid_dv->vv_T0t_4), \
|
||||
&(grid_dv->vv_T0p_4), \
|
||||
&(grid_dv->vv_fun_4), \
|
||||
&(grid_dv->vv_change_4), \
|
||||
&(grid_dv->tau), \
|
||||
&(grid_dv->loc_I_host), \
|
||||
&(grid_dv->loc_J_host), \
|
||||
&(grid_dv->loc_K_host), \
|
||||
&(grid_dv->dr_host), \
|
||||
&(grid_dv->dt_host), \
|
||||
&(grid_dv->dp_host), \
|
||||
&n_nodes_this_level, \
|
||||
&i_node_offset \
|
||||
};
|
||||
print_CUDA_error_if_any(cudaLaunchKernel((void*) cuda_do_sweep_level_kernel_1st, grid_each, threads_each, kernelArgs, 0, grid_dv->level_streams[id_stream]), 30004);
|
||||
|
||||
} else if (iswp == 5) {
|
||||
void* kernelArgs[]{\
|
||||
&(grid_dv->vv_i__j__k___5), \
|
||||
&(grid_dv->vv_ip1j__k___5), \
|
||||
&(grid_dv->vv_im1j__k___5), \
|
||||
&(grid_dv->vv_i__jp1k___5), \
|
||||
&(grid_dv->vv_i__jm1k___5), \
|
||||
&(grid_dv->vv_i__j__kp1_5), \
|
||||
&(grid_dv->vv_i__j__km1_5), \
|
||||
&(grid_dv->vv_fac_a_5), \
|
||||
&(grid_dv->vv_fac_b_5), \
|
||||
&(grid_dv->vv_fac_c_5), \
|
||||
&(grid_dv->vv_fac_f_5), \
|
||||
&(grid_dv->vv_T0v_5), \
|
||||
&(grid_dv->vv_T0r_5), \
|
||||
&(grid_dv->vv_T0t_5), \
|
||||
&(grid_dv->vv_T0p_5), \
|
||||
&(grid_dv->vv_fun_5), \
|
||||
&(grid_dv->vv_change_5), \
|
||||
&(grid_dv->tau), \
|
||||
&(grid_dv->loc_I_host), \
|
||||
&(grid_dv->loc_J_host), \
|
||||
&(grid_dv->loc_K_host), \
|
||||
&(grid_dv->dr_host), \
|
||||
&(grid_dv->dt_host), \
|
||||
&(grid_dv->dp_host), \
|
||||
&n_nodes_this_level, \
|
||||
&i_node_offset \
|
||||
};
|
||||
print_CUDA_error_if_any(cudaLaunchKernel((void*) cuda_do_sweep_level_kernel_1st, grid_each, threads_each, kernelArgs, 0, grid_dv->level_streams[id_stream]), 30005);
|
||||
|
||||
} else if (iswp == 6) {
|
||||
void* kernelArgs[]{\
|
||||
&(grid_dv->vv_i__j__k___6), \
|
||||
&(grid_dv->vv_ip1j__k___6), \
|
||||
&(grid_dv->vv_im1j__k___6), \
|
||||
&(grid_dv->vv_i__jp1k___6), \
|
||||
&(grid_dv->vv_i__jm1k___6), \
|
||||
&(grid_dv->vv_i__j__kp1_6), \
|
||||
&(grid_dv->vv_i__j__km1_6), \
|
||||
&(grid_dv->vv_fac_a_6), \
|
||||
&(grid_dv->vv_fac_b_6), \
|
||||
&(grid_dv->vv_fac_c_6), \
|
||||
&(grid_dv->vv_fac_f_6), \
|
||||
&(grid_dv->vv_T0v_6), \
|
||||
&(grid_dv->vv_T0r_6), \
|
||||
&(grid_dv->vv_T0t_6), \
|
||||
&(grid_dv->vv_T0p_6), \
|
||||
&(grid_dv->vv_fun_6), \
|
||||
&(grid_dv->vv_change_6), \
|
||||
&(grid_dv->tau), \
|
||||
&(grid_dv->loc_I_host), \
|
||||
&(grid_dv->loc_J_host), \
|
||||
&(grid_dv->loc_K_host), \
|
||||
&(grid_dv->dr_host), \
|
||||
&(grid_dv->dt_host), \
|
||||
&(grid_dv->dp_host), \
|
||||
&n_nodes_this_level, \
|
||||
&i_node_offset \
|
||||
};
|
||||
print_CUDA_error_if_any(cudaLaunchKernel((void*) cuda_do_sweep_level_kernel_1st, grid_each, threads_each, kernelArgs, 0, grid_dv->level_streams[id_stream]), 30006);
|
||||
|
||||
|
||||
} else {
|
||||
void* kernelArgs[]{\
|
||||
&(grid_dv->vv_i__j__k___7), \
|
||||
&(grid_dv->vv_ip1j__k___7), \
|
||||
&(grid_dv->vv_im1j__k___7), \
|
||||
&(grid_dv->vv_i__jp1k___7), \
|
||||
&(grid_dv->vv_i__jm1k___7), \
|
||||
&(grid_dv->vv_i__j__kp1_7), \
|
||||
&(grid_dv->vv_i__j__km1_7), \
|
||||
&(grid_dv->vv_fac_a_7 ), \
|
||||
&(grid_dv->vv_fac_b_7 ), \
|
||||
&(grid_dv->vv_fac_c_7 ), \
|
||||
&(grid_dv->vv_fac_f_7 ), \
|
||||
&(grid_dv->vv_T0v_7 ), \
|
||||
&(grid_dv->vv_T0r_7 ), \
|
||||
&(grid_dv->vv_T0t_7 ), \
|
||||
&(grid_dv->vv_T0p_7 ), \
|
||||
&(grid_dv->vv_fun_7 ), \
|
||||
&(grid_dv->vv_change_7 ), \
|
||||
&(grid_dv->tau), \
|
||||
&(grid_dv->loc_I_host), \
|
||||
&(grid_dv->loc_J_host), \
|
||||
&(grid_dv->loc_K_host), \
|
||||
&(grid_dv->dr_host), \
|
||||
&(grid_dv->dt_host), \
|
||||
&(grid_dv->dp_host), \
|
||||
&n_nodes_this_level, \
|
||||
&i_node_offset \
|
||||
};
|
||||
|
||||
print_CUDA_error_if_any(cudaLaunchKernel((void*) cuda_do_sweep_level_kernel_1st, grid_each, threads_each, kernelArgs, 0, grid_dv->level_streams[id_stream]), 30007);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// synchronize all streams
|
||||
//print_CUDA_error_if_any(cudaStreamSynchronize(grid_dv->level_streams[id_stream]), 30008);
|
||||
}
|
||||
|
||||
|
||||
// this function calculate all levels of one single sweep direction
|
||||
void cuda_run_iteration_forward(Grid_on_device* grid_dv, int const& iswp){
|
||||
|
||||
initialize_sweep_params(grid_dv);
|
||||
|
||||
int block_size = CUDA_SWEEPING_BLOCK_SIZE;
|
||||
int num_blocks_x, num_blocks_y;
|
||||
int i_node_offset=0;
|
||||
//get_block_xy(ceil(grid_dv->n_nodes_max_host/block_size+0.5), &num_blocks_x, &num_blocks_y);
|
||||
//dim3 grid_each(num_blocks_x, num_blocks_y);
|
||||
//dim3 threads_each(block_size, 1, 1);
|
||||
|
||||
for (size_t i_level = 0; i_level < grid_dv->n_levels_host; i_level++){
|
||||
get_block_xy(ceil(grid_dv->n_nodes_on_levels_host[i_level]/block_size+0.5), &num_blocks_x, &num_blocks_y);
|
||||
dim3 grid_each(num_blocks_x, num_blocks_y);
|
||||
dim3 threads_each(block_size, 1, 1);
|
||||
|
||||
run_kernel(grid_dv, iswp, i_node_offset, i_level, grid_each, threads_each, grid_dv->n_nodes_on_levels_host[i_level]);
|
||||
//run_kernel(grid_dv, iswp, i_node_offset, i_level, grid_dv->grid_sweep_host, grid_dv->threads_sweep_host, grid_dv->n_nodes_on_levels_host[i_level]);
|
||||
|
||||
i_node_offset += grid_dv->n_nodes_on_levels_host[i_level];
|
||||
}
|
||||
|
||||
finalize_sweep_params(grid_dv);
|
||||
|
||||
// check memory leak
|
||||
//print_memory_usage();
|
||||
|
||||
}
|
||||
27
cuda/iterator_wrapper.cuh
Normal file
27
cuda/iterator_wrapper.cuh
Normal file
@@ -0,0 +1,27 @@
|
||||
#ifndef ITERATOR_WRAPPER_CUH
|
||||
#define ITERATOR_WRAPPER_CUH
|
||||
|
||||
#include <memory>
|
||||
#include <iostream>
|
||||
#include <algorithm>
|
||||
|
||||
#include <cuda_runtime.h>
|
||||
#include <cuda.h>
|
||||
#include <cuda_runtime_api.h>
|
||||
|
||||
#include <stdio.h>
|
||||
#include "grid_wrapper.cuh"
|
||||
#include "cuda_utils.cuh"
|
||||
|
||||
|
||||
//void cuda_do_sweep_level_kernel_3rd();
|
||||
//void cuda_do_sweep_level_kernel_1st();
|
||||
|
||||
void run_kernel(Grid_on_device*, int const&, int const&, int const&, dim3&, dim3&, int const&);
|
||||
|
||||
void initialize_sweep_params(Grid_on_device*);
|
||||
void finalize_sweep_params(Grid_on_device*);
|
||||
void cuda_run_iteration_forward(Grid_on_device*, int const&);
|
||||
|
||||
|
||||
#endif // ITERATOR_WRAPPER_CUH
|
||||
19
debug_mpi.sh
Executable file
19
debug_mpi.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/sh
|
||||
|
||||
# general mpi debugging
|
||||
|
||||
# instantly start the debugger
|
||||
#mpirun --oversubscribe -np $1 xterm -e gdb -ex run --args ../../build/bin/TOMOATT -v -i $2
|
||||
# for break point insertion
|
||||
mpirun --oversubscribe -np $1 xterm -e gdb --args ../../build/bin/TOMOATT -v -i $2
|
||||
# tui mode
|
||||
#mpirun --oversubscribe -np $1 xterm -e gdb --tui --args ../../build/bin/TOMOATT -i $2
|
||||
|
||||
|
||||
# valgrind memory leak check
|
||||
#mpirun --oversubscribe -np $1 valgrind --log-file="log_val" --leak-check=yes --track-origins=yes ../../build/bin/TOMOATT -i $2
|
||||
|
||||
# cuda debug
|
||||
#mpirun --oversubscribe -np $1 xterm -e cuda-gdb -ex run --args ../../build/bin/TOMOATT $2
|
||||
# nvprof
|
||||
#nvprof mpirun --oversubscribe -np 1 ../../build/bin/TOMOATT $2
|
||||
BIN
docs/.DS_Store
vendored
Normal file
BIN
docs/.DS_Store
vendored
Normal file
Binary file not shown.
BIN
docs/logo/TomoATT_logo.png
Normal file
BIN
docs/logo/TomoATT_logo.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 110 KiB |
433
docs/logo/TomoATT_logo.svg
Normal file
433
docs/logo/TomoATT_logo.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 137 KiB |
BIN
docs/logo/TomoATT_logo_2.png
Normal file
BIN
docs/logo/TomoATT_logo_2.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 115 KiB |
BIN
docs/logo/TomoATT_logo_only_cube.png
Normal file
BIN
docs/logo/TomoATT_logo_only_cube.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 82 KiB |
277
docs/manual/index.md
Normal file
277
docs/manual/index.md
Normal file
@@ -0,0 +1,277 @@
|
||||
# TomoATT User Manual version 1.0
|
||||
|
||||
## Introduction
|
||||
|
||||
TomoATT is a library which implements an eikonal equation solver based Adjoint-state Travel-Time Tomography for a very large-scale computation, following a published article [Ping Tong (2021)](https://doi.org/10.1029/2021JB021818) and [Jing Chen (2022)](add_here_when_published).
|
||||
The slowness field and anisotropy fields are computed in spherical coordinate system.
|
||||
|
||||
Thanks to the efficiency of an eikonal equation solver, the computation of the travel-time is very fast and requires less amount of computational resources.
|
||||
As an input data for TomoATT is travel times at seismic stations, we can easily prepare a great amount of input data for the computation.
|
||||
|
||||
This library is developped to be used for modeling a very-large domain. For this purpose, 3-layer parallelization is applied, which are:
|
||||
- layer 1: simulutaneous run parallelization (travel times for multiple seismic sources may be calculated simultaneously)
|
||||
- layer 2: subdomain decomposition (If the number of computational nodes requires too large memory, we can separate the domain into subdomains and run each subdomain in a separate compute node)
|
||||
- layer 3: sweeping parallelization (in each subdomain, sweeping layers are also parallelized)
|
||||
|
||||
The details of the parallelization method applied in this library are described in the paper [Miles Detrixhe and Frédéric Gibou (2016)](https://doi.org/10.1016/j.jcp.2016.06.023).
|
||||
|
||||
Regional events (sources within the global domain) and teleseismic events (sources outside the global domain) may be used for inversion.
|
||||
|
||||
---
|
||||
|
||||
## Input files
|
||||
|
||||
TomoATT requires 3 input files :
|
||||
1. input parameter file (setup file for simulation parameters)
|
||||
2. source receiver file (source and receiver definitions and observation arrival times)
|
||||
3. initial model file (3d initial model)
|
||||
|
||||
|
||||
### 1. input parameter file
|
||||
|
||||
All the necessary parameters for setuping a calculation are described in input parameter file in [yaml format](https://en.wikipedia.org/wiki/YAML).
|
||||
|
||||
Below is an example of input parameter file for making a forward simulation.
|
||||
|
||||
``` yaml
|
||||
version : 2
|
||||
|
||||
domain :
|
||||
min_max_dep : [5740.6370,5790.6370] # depth in km
|
||||
min_max_lat : [45.0,55.0] # latitude in degree
|
||||
min_max_lon : [35.0,45.0] # longitude in degree
|
||||
n_rtp : [40,40,40] # number of nodes
|
||||
|
||||
source :
|
||||
src_rec_file : 'src_rec_test.dat' # source receiver file
|
||||
|
||||
model :
|
||||
init_model_path : './test_model_init.h5' # path to initial model file
|
||||
|
||||
inversion :
|
||||
run_mode : 0 # 0 for forward simulation only, 1 for inversion
|
||||
|
||||
parallel :
|
||||
n_sims : 1 # number of simultaneous run
|
||||
ndiv_rtp : [2,2,1] # number of subdomains
|
||||
nproc_sub : 1 # number of subprocess used for each subdomain
|
||||
|
||||
calculation :
|
||||
convergence_tolerance : 1e-6
|
||||
max_iterations : 100
|
||||
stencil_order : 3 # 1 or 3
|
||||
sweep_type : 1 # 0: legacy, 1: cuthill-mckee with shm parallelization
|
||||
|
||||
output_setting :
|
||||
# output the calculated field of all sources. 1 for yes; 0 for no; default: 1
|
||||
is_output_source_field : 0
|
||||
# output internal parameters, if no, only model parameters are out. 1 for yes; 0 for no; default: 0
|
||||
is_verbose_output : 0
|
||||
# output model_parameters_inv_0000.dat or not. 1 for yes; 0 for no; default: 1
|
||||
is_output_model_dat : 0
|
||||
|
||||
```
|
||||
|
||||
There are categories and sub-categories with setup parameters.
|
||||
Below is the explanation of each category and sub-category.
|
||||
If a category tag or sub-category tag is missing from the input parameter file, the default value will be used.
|
||||
|
||||
#### domain :
|
||||
The domain category is for setting a global domain of the simulation.
|
||||
- `min_max_dep` : `[dep1, dep2]` minimum and maximum depth of the domain in kilo meter
|
||||
- `min_max_lat` : `[lat1, lat2]` minimum and maximum latitude in degree
|
||||
- `min_max_lon` : `[lon1, lon2]` minimum and maximum longitude in degree
|
||||
- `n_rtp` : `[nr, nt, np]` number of computation nodes for r (depth),t (latitude) and p (longitude) direction
|
||||
|
||||
#### source :
|
||||
Source category is used for setting about source and receiver definitions.
|
||||
- `src_rec_file` : for specifying a path to a source receiver file (details of this file is in the following section). The calculated travel time at receivers will be output as a new source receiver file in OUTPUT_FILES directory.
|
||||
- `swap_src_rec` : `0` or `1`. Set 1 for using receivers as sources, which reduces computation time when the number of sources are larger than the number of receivers. (usual when using a large dataset.)
|
||||
|
||||
#### model :
|
||||
- `init_model_path` : File path for 3D initial model file. Details will be explained in the following section.
|
||||
- `model_1d_name` : the name of 1d model used for teleseismic tomography. `ak135` and `iasp91` are available. User defined model can be used by modifying the file `1d_models.h` in `include` directory, by changing `model_1d_prem` part and setting this `model_1d_name` parameter as `user_defined`.
|
||||
#### inversion :
|
||||
- `run_mode` :
|
||||
- `0` for running only a forward simulation.
|
||||
- `1` for do inversion.
|
||||
- `2` for only precalculation of 2d traveltime field for teleseismic sources. This is an optional step for teleseismic case. If 2d traveltime field is not precalculated, it will be calculated during mode `0` or `1`. This is useful for calculations on HPC as 2d eikonal solver has not been parallelized. Users can run it on a local machine beforehand to reduce CPU time on HPC.
|
||||
- `3` run earthquake relocation. For using this mode, `sources : swap_src_rec` need to be set to 1. Otherwise the program will exit with an error message.
|
||||
- `n_inversion_grid` : the number of inversion grid.
|
||||
- `n_inv_dep_lat_lon` : the numbers of inversion grids for r, t and p direction.
|
||||
- `min_max_dep_inv` : `[dep1, dep2]` minimum and maximum depth of inversion grid in kilo meter.
|
||||
- `min_max_lat_inv` : `[lat1, lat2]` minimum and maximum latitude of inversion grid in degree.
|
||||
- `min_max_lon_inv` : `[lon1, lon2]` minimum and maximum longitude of inversion grid in degree.
|
||||
|
||||
Currently, TomoATT provide two ways for defining the inversion grid. One is a regular grid (even intervals for all axis) type and another uses used defined intervals for each axis.
|
||||
|
||||
For setting the user defined intervals, please use the flags below.
|
||||
- `type_dep_inv` : `0` for regular grid (default), `1` for user defined intervals.
|
||||
- `type_lat_inv` : `0` for regular grid (default), `1` for user defined intervals.
|
||||
- `type_lon_inv` : `0` for regular grid (default), `1` for user defined intervals.
|
||||
|
||||
and the coordinates where the main inversion grids are placed,
|
||||
- `dep_inv` : `[z1,z2,z3,...]` for user defined intervals.
|
||||
- `lat_inv` : `[y1,y2,y3,...]` for user defined intervals.
|
||||
- `lon_inv` : `[x1,x2,x3,...]` for user defined intervals.
|
||||
|
||||
other parameers for inversion seetting.
|
||||
- `max_iterations_inv` : The limit of iteration number for inversion.
|
||||
- `step_size` : Maximum step size ratio for updating model.
|
||||
- `smooth_method` : `0` or `1`. 0 for multigrid parametrization ([Ping Tong 2019](https://doi.org/10.1093/gji/ggz151)). 1 for laplacian smoothing with CG method.
|
||||
- `l_smooth_rtp` : `[lr, lt, lp]` smoothing coefficients for laplacian smoothing on r,t and p direction.
|
||||
- `optim_method` : `0`, `1` or `2`. 0 for gradient descent, 1 for gradient descent with halve-stepping, 2 for L-BFGS (THIS MODE IS EXPERIMENTAL AND CURRENTLY NOT WORKING.).
|
||||
- `regularization_weight`: regularization weight, USED ONLY L-BFGS mode.
|
||||
- `max_sub_iterations` : maximum number of sub iteration. Used for optim_method 1 and 2.
|
||||
|
||||
#### parallel :
|
||||
- `n_sims` : number of simulutaneous run
|
||||
- `ndiv_rtp` : `[ndiv_r, ndiv_t, ndiv_p]` number of domain decomposition on r, t and p direction
|
||||
- `nproc_sub` : number of processes used for sweeping parallelization
|
||||
- `use_gpu` : `0` or `1`. Set 1 for using GPU. (Currently gpu mode is used only for a forward simulation. n_sims, ndiv_rtp and nproc_sub need to be 1.)
|
||||
|
||||
The total number of mpi processes (i.e. mpirun -n NUMBER) must be n_sims\*ndiv_r\*ndiv_t\*ndiv_p\*nproc_sub, otherwise the code will stop instantly.
|
||||
|
||||
#### calculation :
|
||||
- `convergence_tolerance` : convergence criterion for forward and adjoint run.
|
||||
- `max_iterations` : number of maximum iteration for forward and adjoint run
|
||||
- `stencil_order` : `1` or `3`. The order of stencil for sweeping.
|
||||
- `sweep_type` : `0`or `1`. 0 is for sweeping in legacy order (threefold loops on r,t and p), 1 for cuthill-mckee node ordering with sweeping parallelization.
|
||||
- `output_file_format` : `0` or `1` for selecting input and output file format. `0` is for HDF5 format, `1` is for ASCII format.
|
||||
|
||||
#### output_setting :
|
||||
- `is_output_source_field` : `0`(no) or `1`(yes). if output the calculated field of all sources.
|
||||
- `is_verbose_output` : `0` or `1`. if output internal parameters, if no, only model parameters are out.
|
||||
- `is_output_model_dat` : `0` or `1`. if output model_parameters_inv_0000.dat or not.
|
||||
|
||||
|
||||
### 2. source receiver file
|
||||
Source receiver file is a file which defines source and receiver positions and arrival times.
|
||||
|
||||
Below is an example:
|
||||
```
|
||||
1 1992 1 1 2 43 56.900 1.8000 98.9000 137.00 2.80 8 305644 <- source 1
|
||||
1 1 PCBI 1.8900 98.9253 1000.0000 P 18.000 <- receiver 1
|
||||
1 2 MRPI 1.6125 99.3172 1100.0000 P 19.400 <- receiver 2
|
||||
1 3 HUTI 2.3153 98.9711 1600.0000 P 19.200
|
||||
....
|
||||
|
||||
```
|
||||
|
||||
```
|
||||
Source line : id_src year month day hour min sec lat lon dep_km magnitude num_recs id_event (weight)
|
||||
Receiver line : id_src id_rec name_rec lat lon elevation_m phase arrival_time_sec (weight)
|
||||
```
|
||||
|
||||
`num_recs` (number of receivers for this source) need to be the same with the number of receiver lines.
|
||||
The last column of both source and receiver line is for put weight (on objective function). These are the optional column and set to be 1.0 if not written.
|
||||
`name_rec` need to be different for each station.
|
||||
`lon` and `lat` are in degree.
|
||||
|
||||
If the source position is out of the global domain (defined in input parameter file.), the code will flag this event as a teleseismic event and run the dedicated routine for teleseimic event. For teleseismic event, swap_src_rec will be ignored for this event (as the teleseismic case, a source is not a point but boundary surfaces).
|
||||
|
||||
|
||||
### 3. initial model file
|
||||
|
||||
Initial model file is used for defining parameters of input mode.
|
||||
Necessary parameters are `vel` (velocity), `eta`, `xi`, `zeta`.
|
||||
|
||||
#### inital model file in HDF5 format
|
||||
|
||||
In HDF5 I/O mode (`output_file_format`: 0 in input parameter file), all the necessary parameters should be saved in one single `.h5` file, with exactly the same name of dataset with parameter name written above.
|
||||
The dimension of dataset should be the same with `n_rtp` in input parameter file.
|
||||
Please refer the `examples/inversion_small/make_test_model.py` for details.
|
||||
|
||||
|
||||
#### initial model file in ASCII format
|
||||
|
||||
In ASCII I/O mode (`output_file_format`: 1 in input parameter file), all the necessary parameters should be save in one single ASCII file.
|
||||
The number of rows in the file need to be equivalent with the number of global nodes (i.e. n_rtp[0]\*n_rtp[1]\*n_rtp[2]).
|
||||
|
||||
The node order should be:
|
||||
```python
|
||||
# write nodes in rtp
|
||||
for ir in range(n_rtp[0]): # number of nodes on r direction
|
||||
for it in range(n_rtp[1]): # number of nodes on t direction
|
||||
for ip in range(n_rtp[2]): # number of nodes on p direction
|
||||
# write out parameters
|
||||
# eta xi zeta fun fac_a fac_b fac_c fac_f
|
||||
|
||||
```
|
||||
|
||||
Complete example may be found `examples/inversion_small_ASCII/make_test_model.py`.
|
||||
|
||||
---
|
||||
|
||||
## Output files
|
||||
|
||||
Calculated travel times at the stations will be writen in `(source receiver file)_out.dat` on the column for travel times.
|
||||
|
||||
Volumetric result data files are saved in OUTPUT_FILES directory.
|
||||
|
||||
As the node order in the output file is not in the global domain but each subdomains, it is necessary to do a small post-processing for extracting slices.
|
||||
`utils/tomoatt_data_retrieval.py` includes functions for this post processing.
|
||||
Please refer the concrete example in `inversion_small/data_post_process.py` for HDF5 mode, and `inversion_small_ASCII/data_post_process.py` for ASCII mode.
|
||||
|
||||
Only the final iteration result will be saved in 3D matrix form as `final_model.h5` or `final_model.dat`, thus it is not necessary to do post-processing for the final result.
|
||||
|
||||
|
||||
### HDF5 I/O mode
|
||||
In HDF5 mode, the code will carry out collective writing from all MPI processes into one single output file, which will try to maximize the I/O bandwidth for efficient I/O.
|
||||
|
||||
TomoATT produces output files like below:
|
||||
- out_data_grid.h5 : grid coordinate and connectivity data
|
||||
- out_data_sim.h5 : field data
|
||||
- out_data_sim.xmf : XDMF index data for visualizing 3D data. This may be open by Paraview.
|
||||
- final_model.h5 : final model parameters
|
||||
|
||||
Travel time field for i-th source may be visualized by reading `OUTPUT_FILES/out_data_sim_i.xmf`.
|
||||
All the inversed parameters from all the sources and receivers are saved in `out_data_sim_0.xmf`.
|
||||
|
||||
|
||||
Internal composition of .h5 data may be indicated by `h5ls -r` command.
|
||||
The composition of out_data_grid.h5 is :
|
||||
```
|
||||
/ Group
|
||||
/Mesh Group
|
||||
/Mesh/elem_conn Dataset {21609, 9} # node connectivity used for visualization
|
||||
/Mesh/node_coords_p Dataset {26010} # longitude [degree]
|
||||
/Mesh/node_coords_r Dataset {26010} # radious [km]
|
||||
/Mesh/node_coords_t Dataset {26010} # latiude [degree]
|
||||
/Mesh/node_coords_x Dataset {26010} # xyz coordinate in WGS84
|
||||
/Mesh/node_coords_y Dataset {26010}
|
||||
/Mesh/node_coords_z Dataset {26010}
|
||||
/Mesh/procid Dataset {26010} # mpi processor id
|
||||
```
|
||||
|
||||
|
||||
out_data_sim.h5 is :
|
||||
```
|
||||
/model Group
|
||||
/model/eta_inv_000j Dataset {25000} # eta at j-th inversion
|
||||
/model/xi_inv_000j Dataset {25000} # xi
|
||||
/model/vel_inv_000j Dataset {25000} # velocity field at j-th inversion
|
||||
```
|
||||
|
||||
then final_model.h5 is :
|
||||
```
|
||||
eta Dataset {10, 50, 50}
|
||||
vel Dataset {10, 50, 50}
|
||||
xi Dataset {10, 50, 50}
|
||||
```
|
||||
The internal composition of the final_model.h5 is exactly the same with the initial model file. Thus it is possible to use the final model file as the initial model file for the next run by specifying `initial_model_file` in input parameter file.
|
||||
|
||||
### ASCII I/O mode
|
||||
In ASCII mode, code will be do independent writing, (i.e. each MPI process do I/O process sequencially) in a single output file.
|
||||
|
||||
The files that TomoATT creates in OUPUT_FILES directory are :
|
||||
|
||||
```
|
||||
out_grid_conn.dat # node connectivity
|
||||
out_grid_ptr.dat # grid coordinate lon (degree), lat (degree), radius (km)
|
||||
out_grid_xyz.dat # grid coordinate in WGS84
|
||||
eta_inv_000j.dat # eta
|
||||
xi_inv_000j.dat # xi
|
||||
vel_inv_000j.dat # velocity
|
||||
```
|
||||
|
||||
164
docs/manual/usage_on_HPCs.md
Normal file
164
docs/manual/usage_on_HPCs.md
Normal file
@@ -0,0 +1,164 @@
|
||||
# commands for compiling and running on HPCs
|
||||
|
||||
## Gekko @ NTU
|
||||
|
||||
### 1. Load necessary modules and select GNU compilers
|
||||
```bash
|
||||
module purge && module load cmake gnu/gcc-9.3.0
|
||||
```
|
||||
|
||||
### 2. compiler openmpi and HDF5 with parallel option
|
||||
`./install_mpi_and_hdf5_local.sh`
|
||||
will create openmpi and hdf5 executables in external_libs/local_mpi_hdf5/bin
|
||||
|
||||
### 3. Compile TomoATT
|
||||
```bash
|
||||
# make a build directory
|
||||
mkdir build
|
||||
|
||||
# compile TomoATT
|
||||
cd build
|
||||
CC=/usr/local/gcc-9.3.0/bin/gcc CXX=/usr/local/gcc-9.3.0/bin/g++ cmake .. -DCMAKE_PREFIX_PATH=$(pwd)/../external_libs/local_mpi_hdf5
|
||||
|
||||
make -j16
|
||||
```
|
||||
|
||||
Then the executable TOMOATT is created in the build directory.
|
||||
|
||||
|
||||
## Fugaku @ RIKEN
|
||||
|
||||
|
||||
### 0. start interactive job on Fugaku (for accessing arch64 environment)
|
||||
|
||||
```bash
|
||||
pjsub --interact -g hp220155 -L "node=1" -L "rscgrp=int" -L "elapse=1:00:00" --mpi "max-proc-per-node=12" --sparam "wait-time=600" -x PJM_LLIO_GFSCACHE=/vol0004 --no-check-directory
|
||||
```
|
||||
|
||||
|
||||
### 1. Load necessary modules
|
||||
```bash
|
||||
# prepare spack env
|
||||
. /vol0004/apps/oss/spack/share/spack/setup-env.sh
|
||||
# load Fujitsu mpi
|
||||
spack load /jfzaut5
|
||||
# or load gnu 11.2.0
|
||||
spack load /nphnrhl /cvur4ou
|
||||
```
|
||||
|
||||
|
||||
|
||||
### 2. Download hdf5 source code and compile it
|
||||
```bash
|
||||
|
||||
# every file will be placed in external_libs
|
||||
cd ./external_libs
|
||||
|
||||
# make a local install pass
|
||||
mkdir local_mpi_hdf5
|
||||
|
||||
# download hdf5 source
|
||||
wget https://gamma.hdfgroup.org/ftp/pub/outgoing/hdf5/snapshots/v112/hdf5-1.12.2-1.tar.gz
|
||||
|
||||
#Extract the downloaded directory
|
||||
tar -xvf hdf5-1.12.2-1.tar.gz && cd hdf5-1.12.2-1
|
||||
|
||||
# Configure the code. (the pathes to mpicc, mpicxx should vary on the environment)
|
||||
CC=mpifcc CFLAGS="-Nclang" CXX=mpiFCC CXXFLAGS="-Nclang" ./configure --enable-parallel --enable-unsupported --enable-shared --enable-cxx --prefix=$(pwd)/../local_mpi_hdf5
|
||||
|
||||
# make
|
||||
make -j12 && make install
|
||||
|
||||
# now hdf5 executables are in external_libs/local_mpi_hdf5/bin
|
||||
```
|
||||
|
||||
or with gnu 11.2.0
|
||||
```bash
|
||||
|
||||
# download hdf5 source
|
||||
wget https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-1.13/hdf5-1.13.2/src/hdf5-1.13.2.tar.gz
|
||||
#Extract the downloaded directory
|
||||
tar -xvf hdf5-1.13.2.tar.gz
|
||||
cd hdf5-1.13.2
|
||||
# Configure the code. (the pathes to mpicc, mpicxx should vary on the environment)
|
||||
CC=mpicc CXX=mpic++ ./configure --enable-parallel --enable-unsupported --enable-shared --enable-cxx --prefix=$(pwd)/../local_mpi_hdf5
|
||||
# make
|
||||
make -j12 && make install
|
||||
|
||||
```
|
||||
|
||||
|
||||
### 3. Compile TomoATT
|
||||
```bash
|
||||
# cd to TomoATT directory
|
||||
cd ../..
|
||||
|
||||
# make a build directory
|
||||
mkdir build
|
||||
|
||||
# compile TomoATT
|
||||
cd build
|
||||
|
||||
CC=mpifcc CXX=mpiFCC cmake .. -DCMAKE_PREFIX_PATH=$(pwd)/../external_libs/local_mpi_hdf5
|
||||
# or for gnu 11.2.0
|
||||
cmake .. -DCMAKE_PREFIX_PATH=$(pwd)/../external_libs/local_mpi_hdf5
|
||||
|
||||
make -j12
|
||||
```
|
||||
|
||||
### 4. terminalte interactive job
|
||||
`Ctrl + D`
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## ASPIRE 1 @ NSCC
|
||||
|
||||
### 0. load necessary modules
|
||||
```bash
|
||||
module purge
|
||||
export PATH=/app/gcc/9.5.0/bin:$PATH && export LD_LIBRARY_PATH=/app/gcc/9.5.0/lib:$LD_LIBRARY_PATH
|
||||
module load intel/19.0.0.117
|
||||
```
|
||||
|
||||
### 1. Download hdf5 source code and compile it
|
||||
```bash
|
||||
|
||||
# download openmpi and hdf5 source code on your LOCAL MACHINE
|
||||
wget https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-1.13/hdf5-1.13.3/src/hdf5-1.13.3.tar.gz
|
||||
|
||||
# then upload it to NSCC (for example)
|
||||
scp -rC hdf5-1.13.3.tar.gz aspire1:~/(where TomoATT placed)/external_libs/
|
||||
|
||||
# on ASPIURE 1
|
||||
cd external_libs
|
||||
|
||||
mkdir local_mpi_hdf5
|
||||
|
||||
# extract tar file and cd to the directory
|
||||
tar -xvf hdf5-1.13.3.tar.gz && cd hdf5-1.13.3
|
||||
|
||||
# configure the code
|
||||
CC=mpiicc CXX=mpiicpc \
|
||||
./configure --enable-parallel --enable-unsupported --enable-shared --enable-cxx --prefix=$(pwd)/../local_mpi_hdf5
|
||||
|
||||
# make and install to the prefix
|
||||
make -j16 && make install
|
||||
|
||||
```
|
||||
|
||||
### 2. Compile TomoATT
|
||||
```bash
|
||||
# cd to TomoATT directory
|
||||
cd ../..
|
||||
|
||||
# make a build directory
|
||||
mkdir build
|
||||
|
||||
# compile TomoATT
|
||||
cd build
|
||||
CC=icc CXX=icpc cmake .. -DCMAKE_PREFIX_PATH=$(pwd)/../external_libs/local_mpi_hdf5
|
||||
|
||||
make -j16
|
||||
```
|
||||
1
docs/mpi_diagram/class_diagram
Normal file
1
docs/mpi_diagram/class_diagram
Normal file
@@ -0,0 +1 @@
|
||||
<mxfile host="app.diagrams.net" modified="2022-05-10T02:47:59.004Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.41 Safari/537.36" etag="L6iIxsoobwzSYsZj_5N6" version="18.0.2" type="device"><diagram id="C5RBs43oDa-KdzZeNtuy" name="Page-1">7VxZc+I4EP41VM0+hMK3eUzIsZnN7BzMTnaeKIEFaGNbjC0CzK9fSZZPyVyJgRyZqgxuS23U/anVl9MyesHyJgKz6SfsQb+ld7xly7hs6bquORb9j1FWCcVxjIQwiZCXkLSc0Ee/oSB2BHWOPBiXBhKMfYJmZeIIhyEckRINRBFelIeNsV9+6gxMoEToj4AvU++RR6YJ1dWdnP4nRJNp+mTN7iZ3ApAOFiuJp8DDiwLJuGoZvQhjknwKlj3oM+Glcrm/Xd37dw/2zcev8S/wz8Vf3//+cZYwu95lSraECIZkb9Y/v/+6duO/P17ejo3rr1e/z0c3Wsr6EfhzIa8bptZkvWSVCjFeoMAHIb26EBNgROCyIuENX0/LZEbBBnEASbSi8wQXQ0h5lcJHXC9ypekpbVpQWEYEAiiTjHUuDPpByGMH2diSbG4JjADB0VHkc3oC0hxJENCjm09c4ohM8QSHwL/KqRcRnoceZGw79Cofc4fxjBI1SvwPErISlgTMCaakKQl8cRcuEfmXTW9b4upn4c7lUnDmF6vCxRcYIbpuGKW0kMqgwIhd/izey1nxq1XxqsosEQRb/R46pxLE82gE14haSJqAaALJmnGuGkMR9AFBj+Uv9/x4cN/xcGJ46B4TD45kQM/5pD5fXUx5foMjSL9GFMsWdQpm7ONo5SOKj8igQl1MEYH9GeCSWVB3pQyEYYKku2FGAKOHCcfX5zmhbKCgxwmUNKtBc62ZZXNtdBXm2lKYa6cpa+1K2ugzDVASXUmP3upRx67TbrdfvS4sTdaFpil0YTeli26tLkyqB+px9+zXqAjdPTVFpA8raOISPdKIhU0M6VFD5Y+CuU+oc4fnMTOd85DHOnhOAxjd9gmTKfUI7Qn79CEjpRQqMuD7LKi6DgeUVzaJErJB/Gn812xOBmxKELdXgf+HBAGqEVLWcEwi/AB72Mfs8Akx90LHyPcrJOCjScjAQzXIzqkLpl9EA6VzcSNAnsePZBWkysd0UwAxO1YZILrCaroKgBiNAUSXAEJ1PwRsYjBDPHANArbyCM+YQKECFTIlTGbQUzBmE6hMz9kqceR7g5DdUHAJ+MkNwofK+IRUHc2+1jyk+uXxipjw6cvtgN0Y3H/+dne5Bbi2B00EqQUBQ86KQWSG6f7h2rAuWtYl40Vdt9TKSLht6YbNfyrgpfSxxf4xOg5JgZ78MLqPZj8E16aQSYFQhqbyQFfZLrcxaFoSNOttlQBqggiEw/1BSp+xA0TZ6B0AyoYnG+odmttDs5IaMLuWDE39oNCUcyeSRmHonbP8Xn5CeSCe8iNGK2tadfbsGVrJQizISOWOp7StIybxhC8MYwXPxy5bD8eqiD6J+MSsYhZPYmS2K2dkNb+TBIUSK67IbOH761Y3NutWmJwavItUr9iRrQyaT3XzNa0kGFthobNQoKjjblPbQJe9S0lUmU8/H9b5Xvu48x6IHj5TNojwPEW7Y5WJOqcWHH9mgelOI8npsIObtwYktaqy9LZsomytnZ6xJd/faEo7smv3rh1unNy2U7YxabXkiNrS5Jj5jWvrmFtHrhu9KGVU3DioeRZ0VI5f13YMYDe52XS7rZ/cZjPkZGGtfsc+XApH7qLg0418EMdoxIUKIiKTax28dN1akl1Pa8pmm3vUnFuZVNGmB6A7Hind+JELh+PtfMh9t6KmqsFlxCe6kbZRtczVdPG2fqSmbWTVsB9p7GDQ3zF2OIx1S6AwjdRT2xVhRpmRIzFqOk4xN+PrKHGK7pbjFEdRyT9wnCJnkl7Ucf6Ew9lcq6rTiFO2SKa8Se2cZJyiy1XvN66tYypjBz/2bSjjlKMOXfYI+/OhhwPAi6MeHOFghmOkrl+sL7166HEQEVYGUdVf64qvm2skn2CAuWBjDhnKhVVKYsjYEOpuisIuBKMpo+fLqaDwldd1Kx0YjqquqymwVk2TP1+Ea0sqOGSvWivvVMua05is3FKrmmNlPW313WqtU+9VS6tTG5vVEuN8rG414yUk65vaoFnXTVpU2rZ2wiHakEKsF7BF7dexRdM3MzZuUfuoW/QlpJ2b2qJS89wpbFE52Klxd/buNpkPd+k2oaN36Tahw5Nuk+rwwjdnEkgnIG+wiwt3yBaVrFJRW9tIX1/iBZCD9KVUuvlcQ4FY55CJLlM+4xFTzVlR38/UJ8UZDzL8bgPffMoOIM4nsdsKLMuUXnGF7BtTRbMAiwUUeme4KkRJ1S0xEEJiwmPy65CISvK0tkKWgq9N2h9+K+hl422rWrSUWX29sb0g9/FcCbO9rotwCmIlPBBhdLxgY4NqUDzmeGUviMpN7UfESn1Q3RQQTKuCA1UHfFeBg8YanM0tyiSNleGqG9SCrmeqtrSrDw07r8M30f3XaFGt+nqrXdXn1lU1bQOjmqoaVQpYFYaJnVL/hR31c3KoJRyftWRnynUocTyvsUiK4w4EzM0Ph/GsdGg+6Tgvv7yx7hQ/Ozujvz8wXxEFiiTiulP8NNufs91Xu18Pf6BqlS5YVTSkMqTN+ZYvvEy3e0/UJAIegqXG9zH/Kdy7RBEciaeELB3TZBJLt9rpX29YF3JonXZawzhIN7wpR8n9BYSzZMW8REE3KMGqOHl9VYOZKeb6175T9nwVjSnVDQuEQYDDCfP7pzCJoIumkocG4g4IYDkF0PnQT5kkDuKbe7utUgVxTdV7GoesgqSMTyzFatINqqdp1STNarQ2vrVvOkarmGhta3r2Iv8pJVv1bZOtZkPJVnqZ/9WaxHfL//aPcfU/</diagram></mxfile>
|
||||
1
docs/mpi_diagram/earthquake_relocation
Normal file
1
docs/mpi_diagram/earthquake_relocation
Normal file
@@ -0,0 +1 @@
|
||||
<mxfile host="app.diagrams.net" modified="2022-09-12T02:52:25.950Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36" etag="SK8VwU3GGxZN_m9dQugb" version="16.4.8" type="device"><diagram id="C5RBs43oDa-KdzZeNtuy" name="Page-1">3Vpbd5s4EP41Pmf3oTkgMLYfYyfb7m53223aTfLUI4NsSASiQsT2/vodgTAX0cRJTLD9wkGDJEYz880NBtYsXL/nOPb/Yh6hA2R464F1MUDIdhBcJWGTE0aGmROWPPByUoVwFfxHFNFQ1DTwSFKbKBijIojrRJdFEXFFjYY5Z6v6tAWj9bfGeEk0wpWLqU69Djzh59QxGpX0DyRY+sWbTWeSPwlxMVmdJPGxx1YVknU5sGacMZHfhesZoVJ2hVyuf99c04/3zvs//kl+4G/TP7/+/e+7fLPfnrNkewROIvHireM73/wRfWE4uJlN/U8fHsYT9s501NnEphAY8UB+asi48NmSRZheltQpZ2nkEbmtAaNyzkfGYiCaQLwjQmyUMeBUMCD5IqTqKVkH4qZyfyu3Ohuq0cVa7ZwNNmqQ8ymZa+j1CaGoeQlLuUsekYRSssB8ScRjErO2qgfIEBYSwTewkBOKRfBQ5w4r411u55UKghulo2foy7L61Fepo9vKk3Z9gVb4prJIDm+rz8pl2ejQ9GwZfepZcfmAaareNMPUTeHVBMhfOX7IvLQIQjleBIR6iXwQDpBD4VjTOYe7pbxbMC45zN7JiUuAdy7nhpollXYiVbvyA0GuYpxJcwWxoW4TeJ4wmgpyzl1lNxm1HNkwTARn91u/i7bqBRYEWb9Awbo+il0KL63C1Db+rEqnjwxF8ysOf2h0BVVTE/ChQhUGnwkHWxKEHzh8ixTiKfiiPtFraui9AoZFZo1gjMjBoURTNE/iTGRNyPI0+h5CIgYLBtY5XJFmSjqyKkYAmUos54XrpczpzhaUrVwfODhLJB/ffwbvzsA5qoNzrGPTbMOm0xk2dYEeKjYPFIdoRxz2GkWRjsMVGHpxPBkFceTV4+IJx0TT2TEmWl3hDh0N7rrGj7VruTHptdzQAHS5Fhy7oiUJxZJIsOuXx5cJ6S83ya+nharJsIYqa9QSzUZvGc1sXUvSq+0nY4CUMAwiLKCK6DdpcHrPGtDkWLzXoxm9WcsjyrSir4x+Z1c4eqUrVEs/syASpZ3ZjShpmQ37yRlTqxomtGXjFcWC7mQ/pSJORVbYU6LnJC9EsocFnuOEvDGOLadRmRsjDcfjtsJ82BWMj6cwP9Dk3xztCNlee6UFlxVgfSHYU22yE8pIkNXISFp6X5MWhNmdlde9flUwKvh66qvC462vAwuU411R12vRbY2Oxr3use+ZjZqbHYxF2L364bHmh2c+ce8H2UdfcIdLEslicU9ZDnGDJGDRW1crqO6DHaOlWmlzwt1VK/1+KnyOEz5UhzvZEV6vrkxeh6+Jhi+38rEQAOIFJKt42EJe5nfEzbiC+iKN4BbAUjRCkzSUB2Z6K+eE8qUtDhVW7WELVltLks6wqrdwQA9TvVABSYiGJ8zENmNUNmguIhZJGEPdSBskTIMlOMULFyQnA9RUyjUAQzlXD8LA8+jPisO6X+hMMUNUT2QNR1OM85b96iKxPoJc5pjyFrRr/ZinDr19PtILyG+xl3vVbYebMhfnLrQHD9mC886w2WjHDu0d27HdVZn20YHzwFqvhd98+m+KvbRezznHm8qEWLZUk8rOjc4sGtVz6qFjNIwm33Gv7dhCJhXQnzy0Gx1aq6V22cbYKrSRaT8b2zAsf5nNlVb+d2xd/g8=</diagram></mxfile>
|
||||
BIN
docs/mpi_diagram/earthquake_relocation.drawio.png
Normal file
BIN
docs/mpi_diagram/earthquake_relocation.drawio.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 40 KiB |
1
docs/mpi_diagram/mpi_group_explanation
Normal file
1
docs/mpi_diagram/mpi_group_explanation
Normal file
@@ -0,0 +1 @@
|
||||
<mxfile host="app.diagrams.net" modified="2022-05-10T02:54:26.273Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.41 Safari/537.36" etag="8aiYvozFSNltXpVq9eHv" version="18.0.2" type="device"><diagram id="C5RBs43oDa-KdzZeNtuy" name="Page-1">7Vzpd9o4EP9reK/7AR4+gY85u+2227TpNttPPIEFeGNb1BZJ6F+/o8OnZK7EQI70tcVjSdgzv7k0o7Sss/DhfYzms8/Ew0HL7HoPLeu8ZZqGZffhP0ZZCkqvZwnCNPY9OSgnXPu/sSR2JXXhezgpDaSEBNSfl4ljEkV4TEs0FMfkvjxsQoLyt87RFCuE6zEKVOqN79GZoPbNXk7/E/vTWfrNhjsQd0KUDpZvksyQR+4LJOuiZZ3FhFDxKXw4wwFjXsqXmw/Lm+DTrfv+49fkF/rn9K/vf/9oi8Uut5mSvUKMI7rz0j+//7rsJ39/PP8wsS6/Xvw+Gb832oZkQ0KXKcOwB/yTlySmMzIlEQoucuppTBaRh9myXbjKx3wiZA5EA4j/YUqXEgxoQQmQZjQM5F384NN/2fSOI69+Fu6cP8iV+cWycHGFYz/EFMcpLaLxsrAQu/xZvJcvxa+WxavqYoIR7O0rwFnDdTkuIYt4jFewWnKaoniK6Ypx/QxaoJOYwCPGS5gX4wBR/678cEgqxzQblwMAPkgMbIOH/hsejgwPg0PiQT7kHQoW8ptO+KRr/nYJrPkNjzE8RpwowAF7OWcfx8vAB3zEFjD1fuZTfD1HnDP34HHKQBgJJH0aZQQ0vp1yfH1ZUFgGS3oioGQ4maDgESh+2EFUKmvlKmZPGn7p+UxHXt/nfiSjzQo+JJ335NLoK9K4ZhIAErzJGdw6A9/c7XQ6L14WVk+VhWFoZOE2JYtBrSxskIMDf92XKAjbODZBpF9WkMS5fwdBJ5sYgasB/vvhIqAowmSRMNO5iHi4ShYQg5puQBlPY/g0ZZ/eZaSUAixDQcDi4stoCGtlk4CQDeLfxv+ZL+iQTQmTzjIM/lAgABKhZQknNCa3+IwEhDmfiDCJnk78IKiQUOBPIwYekCDzU6dMvj7EuifyRuh7HnfJOkiV3XRTAHEspwyQvsZq9jUAsRoDiKkABGQ/QmxiOPd57hGG7M1jMmcMxRpUqJRIzAAvmLAJwNMT9pYkDrxhxG5oVgm550bRbWW8IFVHs8daRCBfCiBIJ3y++jBkN4Y3X759Ot8AXJuDJsZgQdCIL8UgMiegP1wazmnLOWdrQeiWWhkFty3TcvlPBbxAnzjsD6OTiBbo4ofRA3/+Q67aGDLXe3Od4eo3hktHwWW9oZIoFXDwSbQ7QuE7tsAnG70FOtlwoU1vuFyLy9RCOo6KRHOvSHQVJCoCxJF3wrZjcm/koWTG3YlRFqzOz+yYRqk8K/BIF3qntI2zI/kNVwxSuamwBmVj4RoV1ovsTs4qbrooC9mdij+sBkIiAVSW4oLMXnx32ZrWetlKC1MDb7kzJxWwlUHzsSG9bZQY42gMsmFrZDxoSg1MNZJUWJXF74tRXZy1S+juofj2CyzjU74n0ek6ZaLJqYUgnxlc0DQqnMEWId0KkNR7TrOjmijX6KQutRTnW01JRw3j3qTDtxv6nV4ltukdXFqGmh+/cmkdUnWM5y2MStSGDc/BPV2cN3B7FnKbVDbT7VQTicMrm6VuDNbKdxLgBxnInRZiunGAksQfc6aimKrk2gAvfW9D7KSnJUC7wwNovlqZVJGmh3B/MtZG7eM+Hk02iyF3VUWjq4kwMuIjw0jXqlrm6tbwpnGkYaxdquE40trCoL9hbH8YG5RAYVtppLZ9plJGl7JQ03mKvR5fB8lTbKOSp7gHz1PUjaNn5c4f4ZztlaI6jjxlg82UVymdo8xTTLXC/cqldUhhbBHHvg5hHHPWYaoR4fVi5JEQ8UKoh8cknJPE15crVpdZPf9uGFNW9dDVWusKretLIp9xSDhjEw4ZWIUVRhLMlqEQbsoiLkbjGaPnr1NB4Quv4Va6LRxdDdfQYM1pKuSxXEUE++xLa+VdaVkjGuNVv9SW1nOy/rX6zrTWsfelpXX4tY1pwjgfqjPNeg6b9U0pqGmXFdTetHbCIdqQQJxnoKLuy1DRtJF+rYq6B1XR57Dt3JSKVhvljkJF1WSnJtzZublkMdqmuQRGb9NcAsNFc0l1eOHJGQfSCb433CaE22dHSlapqK1tpKdNeAFkD+1RlQTDHWjw2tvnNpetenifCaZdlPYTNUXxhYcZejcBbz5lCwjnk9htDZJVylnxDdkTg5hZesXSCbM7WhZypKpCDCWTGPMY/7o0Bk4elyJkG/C1W/b7VoSK4XZ07VnaHX2zMU1Qe3gupMle1TA4Q4kWHD5ldHLPxobVhHjC0crO8qnN6wdESn1C3RQMsqQ3deC6TveBBgeNNTLbG5RIGivBVdXTwX3P1il03xxZbl6Db6Lzr9GCWtagl8q96uA2rajZ1SStulBNRQ2EgpaFYVJTah/Y7uq/J4eaWPFJy3W2WoOSznmFRdI4OxSyED8aJfOSy3yUMy8f0ljlw9vtNvz7jsWJfqjZQFzlw4+z0znTvlp93bs7LceV2jxIZ0abiyufeYFu+26oaYw8H5c63Cf8p3Dv3I/xWH5LxDZiGk01nE56zH5VumF0O2n1Yi998LaaH1/fYzwXb8yLE6CelOgy5NX1DGakWNhfe3Ls6WoZM5ANS4FRSKIpi/lnWOTORUPJ0wJ5B4W4nPx3312ni4jw8NWdYavUP3pd3QmNfdY/0oWPbHPVBgU10w1VscFqtdaezbd7Vqu4xdoxzOy4/jFts5qbbrPaB91mdXQtDMJYzIzMWFx9gCFnhXimeIo1H5cS5ylBnPcXpkJaED433YbgR229VnbWlp1AD9CS/0KALpm0imcsC7FdNz1+K4Kq9dauvSbb1Y3ftBadUsCQRiUlc38tCC358/ZYmDMRCMJ6KBALyJGlx2W+w+dGuOg+xPvf+3RWNNdZIh6KX8GTyUY8VPlBgTzPaU9mm5kjYc9rnTv51XduLNo8EtGZZAL2dxLwkHIGphtHiuUphZxN2WyrarIVi23pTncaOwQTcJn/1h+RUOW/O8m6+B8=</diagram></mxfile>
|
||||
BIN
docs/mpi_diagram/mpi_group_explanation.pdf
Normal file
BIN
docs/mpi_diagram/mpi_group_explanation.pdf
Normal file
Binary file not shown.
BIN
examples/.DS_Store
vendored
Normal file
BIN
examples/.DS_Store
vendored
Normal file
Binary file not shown.
7
examples/clean_examples.sh
Executable file
7
examples/clean_examples.sh
Executable file
@@ -0,0 +1,7 @@
|
||||
rm -r ./*/OUTPUT_FILES
|
||||
rm ./*/*.dat
|
||||
rm ./*/*.h5
|
||||
rm ./*/*.txt
|
||||
rm ./*/params_log.yaml
|
||||
rm -r ./*/OUTPUT_FILES*
|
||||
rm ./*/*/*.h5
|
||||
@@ -0,0 +1,215 @@
|
||||
version: 3
|
||||
|
||||
#################################################
|
||||
# computational domian #
|
||||
#################################################
|
||||
domain:
|
||||
min_max_dep: [-10, 50] # depth in km
|
||||
min_max_lat: [0, 2] # latitude in degree
|
||||
min_max_lon: [0, 2] # longitude in degree
|
||||
n_rtp: [61, 61, 61] # number of nodes in depth,latitude,longitude direction
|
||||
|
||||
#################################################
|
||||
# traveltime data file path #
|
||||
#################################################
|
||||
source:
|
||||
src_rec_file: OUTPUT_FILES/OUTPUT_FILES_signal/src_rec_file_forward_noisy.dat # source receiver file path
|
||||
swap_src_rec: true # swap source and receiver (only valid for regional source and receiver, those of tele remain unchanged)
|
||||
|
||||
#################################################
|
||||
# initial model file path #
|
||||
#################################################
|
||||
model:
|
||||
init_model_path: 2_models/model_init_N61_61_61.h5 # path to initial model file
|
||||
# model_1d_name: dummy_model_1d_name # 1D model name used in teleseismic 2D solver (iasp91, ak135, user_defined is available), defined in include/1d_model.h
|
||||
|
||||
#################################################
|
||||
# parallel computation settings #
|
||||
#################################################
|
||||
parallel: # parameters for parallel computation
|
||||
n_sims: 8 # number of simultanoues runs (parallel the sources)
|
||||
ndiv_rtp: [1, 1, 1] # number of subdivision on each direction (parallel the computional domain)
|
||||
nproc_sub: 1 # number of processors for sweep parallelization (parallel the fast sweep method)
|
||||
use_gpu: false # true if use gpu (EXPERIMENTAL)
|
||||
|
||||
############################################
|
||||
# output file setting #
|
||||
############################################
|
||||
output_setting:
|
||||
output_dir: OUTPUT_FILES/OUTPUT_FILES_inv # path to output director (default is ./OUTPUT_FILES/)
|
||||
output_source_field: false # True: output the traveltime field and adjoint field of all sources at each iteration. Default: false. File: 'out_data_sim_group_X'.
|
||||
output_kernel: false # True: output sensitivity kernel and kernel density. Default: false. File: 'out_data_sim_group_X'.
|
||||
output_final_model: true # True: output merged final model. This file can be used as the input model for TomoATT. Default: true. File: 'model_final.h5'.
|
||||
output_middle_model: false # True: output merged intermediate models during inversion. This file can be used as the input model for TomoATT. Default: false. File: 'middle_model_step_XXXX.h5'
|
||||
output_in_process: false # True: output at each inv iteration, otherwise, only output step 0, Niter-1, Niter. Default: true. File: 'out_data_sim_group_0'.
|
||||
output_in_process_data: false # True: output src_rec_file at each inv iteration, otherwise, only output step 0, Niter-2, Niter-1. Default: true. File: 'src_rec_file_step_XXXX.dat'
|
||||
single_precision_output: false # True: output results in single precision. Default: false.
|
||||
verbose_output_level: 0 # output internal parameters, (to do).
|
||||
output_file_format: 0 # 0: hdf5, 1: ascii
|
||||
|
||||
# output files:
|
||||
# File: 'out_data_grid.h5'. Keys: ['Mesh']['elem_conn'], element index;
|
||||
# ['Mesh']['node_coords_p'], phi coordinates of nodes;
|
||||
# ['Mesh']['node_coords_t'], theta coordinates of nodes;
|
||||
# ['Mesh']['node_coords_r'], r coordinates of nodes;
|
||||
# ['Mesh']['node_coords_x'], phi coordinates of elements;
|
||||
# ['Mesh']['node_coords_y'], theta coordinates of elements;
|
||||
# ['Mesh']['node_coords_z'], r coordinates of elements;
|
||||
# File: 'out_data_sim_group_0'. Keys: ['model']['vel_inv_XXXX'], velocity model at iteration XXXX;
|
||||
# ['model']['xi_inv_XXXX'], xi model at iteration XXXX;
|
||||
# ['model']['eta_inv_XXXX'], eta model at iteration XXXX
|
||||
# ['model']['Ks_inv_XXXX'], sensitivity kernel related to slowness at iteration XXXX
|
||||
# ['model']['Kxi_inv_XXXX'], sensitivity kernel related to xi at iteration XXXX
|
||||
# ['model']['Keta_inv_XXXX'], sensitivity kernel related to eta at iteration XXXX
|
||||
# ['model']['Ks_density_inv_XXXX'], kernel density of Ks at iteration XXXX
|
||||
# ['model']['Kxi_density_inv_XXXX'], kernel density of Kxi at iteration XXXX
|
||||
# ['model']['Keta_density_inv_XXXX'], kernel density of Keta at iteration XXXX
|
||||
# ['model']['Ks_over_Kden_inv_XXXX'], slowness kernel over kernel density at iteration XXXX
|
||||
# ['model']['Kxi_over_Kden_inv_XXXX'], xi kernel over kernel density at iteration XXXX
|
||||
# ['model']['Keta_over_Kden_inv_XXXX'], eta kernel over kernel density at iteration XXXX
|
||||
# ['model']['Ks_update_inv_XXXX'], slowness kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['model']['Kxi_update_inv_XXXX'], xi kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['model']['Keta_update_inv_XXXX'], eta kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['1dinv']['vel_1dinv_inv_XXXX'], 2d velocity model at iteration XXXX, in 1d inversion mode
|
||||
# ['1dinv']['r_1dinv'], r coordinates (depth), in 1d inversion mode
|
||||
# ['1dinv']['t_1dinv'], t coordinates (epicenter distance), in 1d inversion mode
|
||||
# File: 'src_rec_file_step_XXXX.dat' or 'src_rec_file_forward.dat'. The synthetic traveltime data file.
|
||||
# File: 'final_model.h5'. Keys: ['eta'], ['xi'], ['vel'], the final model.
|
||||
# File: 'middle_model_step_XXXX.h5'. Keys: ['eta'], ['xi'], ['vel'], the model at step XXXX.
|
||||
# File: 'inversion_grid.txt'. The location of inversion grid nodes
|
||||
# File: 'objective_function.txt'. The objective function value at each iteration
|
||||
# File: 'out_data_sim_group_X'. Keys: ['src_YYYY']['time_field_inv_XXXX'], traveltime field of source YYYY at iteration XXXX;
|
||||
# ['src_YYYY']['adjoint_field_inv_XXXX'], adjoint field of source YYYY at iteration XXXX;
|
||||
# ['1dinv']['time_field_1dinv_YYYY_inv_XXXX'], 2d traveltime field of source YYYY at iteration XXXX, in 1d inversion mode
|
||||
# ['1dinv']['adjoint_field_1dinv_YYYY_inv_XXXX'], 2d adjoint field of source YYYY at iteration XXXX, in 1d inversion mode
|
||||
|
||||
|
||||
#################################################
|
||||
# inversion or forward modeling #
|
||||
#################################################
|
||||
# run mode
|
||||
# 0 for forward simulation only,
|
||||
# 1 for inversion
|
||||
# 2 for earthquake relocation
|
||||
# 3 for inversion + earthquake relocation
|
||||
# 4 for 1d model inversion
|
||||
run_mode: 1
|
||||
|
||||
have_tele_data: false # An error will be reported if false but source out of study region is used. Default: false.
|
||||
|
||||
###################################################
|
||||
# model update parameters setting #
|
||||
###################################################
|
||||
model_update:
|
||||
max_iterations: 40 # maximum number of inversion iterations
|
||||
optim_method: 0 # optimization method. 0 : grad_descent, 1 : halve-stepping, 2 : lbfgs (EXPERIMENTAL)
|
||||
|
||||
#common parameters for all optim methods
|
||||
step_length: 0.02 # the initial step length of model perturbation. 0.01 means maximum 1% perturbation for each iteration.
|
||||
|
||||
# parameters for optim_method 0 (gradient_descent)
|
||||
optim_method_0:
|
||||
step_method: 1 # the method to modulate step size. 0: according to objective function; 1: according to gradient direction
|
||||
# if step_method:0. if objective function increase, step size -> step length * step_length_decay.
|
||||
step_length_decay: 0.9 # default: 0.9
|
||||
# if step_method:1. if the angle between the current and the previous gradients is greater than step_length_gradient_angle, step size -> step length * step_length_change[0].
|
||||
# otherwise, step size -> step length * step_length_change[1].
|
||||
step_length_gradient_angle: 120 # default: 120.0
|
||||
step_length_change: [0.5, 1.2] # default: [0.5,1.2]
|
||||
# Kdensity_coe is used to rescale the final kernel: kernel -> kernel / pow(density of kernel, Kdensity_coe). if Kdensity_coe > 0, the region with less data will be enhanced during the inversion
|
||||
# e.g., if Kdensity_coe = 0, kernel remains upchanged; if Kdensity_coe = 1, kernel is fully normalized. 0.5 or less is recommended if really required.
|
||||
Kdensity_coe: 0 # default: 0.0, limited range: 0.0 - 0.95
|
||||
|
||||
# smoothing
|
||||
smoothing:
|
||||
smooth_method: 0 # 0: multiparametrization, 1: laplacian smoothing (EXPERIMENTAL)
|
||||
l_smooth_rtp: [1, 1, 1] # smoothing coefficients for laplacian smoothing
|
||||
|
||||
# parameters for smooth method 0 (multigrid model parametrization)
|
||||
# inversion grid can be viewed in OUTPUT_FILES/inversion_grid.txt
|
||||
n_inversion_grid: 5 # number of inversion grid sets
|
||||
|
||||
uniform_inv_grid_dep: false # true if use uniform inversion grid for dep, false if use flexible inversion grid
|
||||
uniform_inv_grid_lat: true # true if use uniform inversion grid for lat, false if use flexible inversion grid
|
||||
uniform_inv_grid_lon: true # true if use uniform inversion grid for lon, false if use flexible inversion grid
|
||||
|
||||
# -------------- uniform inversion grid setting --------------
|
||||
# settings for uniform inversion grid
|
||||
n_inv_dep_lat_lon: [12, 9, 9] # number of the base inversion grid points
|
||||
min_max_dep_inv: [-10, 50] # depth in km (Radius of the earth is defined in config.h/R_earth)
|
||||
min_max_lat_inv: [0, 2] # latitude in degree
|
||||
min_max_lon_inv: [0, 2] # longitude in degree
|
||||
|
||||
# -------------- flexible inversion grid setting --------------
|
||||
# settings for flexible inversion grid
|
||||
dep_inv: [-10, 0, 10, 20, 30, 40, 50, 60] # inversion grid for vel in depth (km)
|
||||
lat_inv: [30, 30.2, 30.4, 30.6, 30.8, 31, 31.2, 31.4, 31.6, 31.8, 32] # inversion grid for vel in latitude (degree)
|
||||
lon_inv: [30, 30.2, 30.4, 30.6, 30.8, 31, 31.2, 31.4, 31.6, 31.8, 32] # inversion grid for vel in longitude (degree)
|
||||
trapezoid: [1, 0, 50] # usually set as [1.0, 0.0, 50.0] (default)
|
||||
|
||||
# Carefully change trapezoid and trapezoid_ani, if you really want to use trapezoid inversion grid, increasing the inversion grid spacing with depth to account for the worse data coverage in greater depths.
|
||||
# The trapezoid_ inversion grid with index (i,j,k) in longitude, latitude, and depth is defined as:
|
||||
# if dep_inv[k] < trapezoid[1], lon = lon_inv[i];
|
||||
# lat = lat_inv[j];
|
||||
# dep = dep_inv[k];
|
||||
# if trapezoid[1] <= dep_inv[k] < trapezoid[2], lon = mid_lon_inv+(lon_inv[i]-mid_lon_inv)*(dep_inv[k]-trapezoid[1])/(trapezoid[2]-trapezoid[1])*trapezoid[0];
|
||||
# lat = mid_lat_inv+(lat_inv[i]-mid_lat_inv)*(dep_inv[k]-trapezoid[1])/(trapezoid[2]-trapezoid[1])*trapezoid[0];
|
||||
# dep = dep_inv[k];
|
||||
# if trapezoid[2] <= dep_inv[k], lon = mid_lon_inv+(lon_inv[i]-mid_lon_inv)*trapezoid[0];
|
||||
# lat = mid_lat_inv+(lat_inv[i]-mid_lat_inv)*trapezoid[0];
|
||||
# dep = dep_inv[k];
|
||||
# The shape of trapezoid inversion gird (x) looks like:
|
||||
#
|
||||
# lon_inv[0] [1] [2] [3] [4]
|
||||
# |<-------- (lon_inv[end] - lon_inv[0]) ---->|
|
||||
# dep_inv[0] | x x x x x |
|
||||
# | |
|
||||
# dep_inv[1] | x x x x x |
|
||||
# | |
|
||||
# dep_inv[2] = trapezoid[1] / x x x x x \
|
||||
# / \
|
||||
# dep_inv[3] / x x x x x \
|
||||
# / \
|
||||
# dep_inv[4] = trapezoid[2] / x x x x x \
|
||||
# | |
|
||||
# dep_inv[5] | x x x x x |
|
||||
# | |
|
||||
# dep_inv[6] | x x x x x |
|
||||
# |<---- trapezoid[0]* (lon_inv[end] - lon_inv[0]) ------>|
|
||||
|
||||
|
||||
# In the following data subsection, XXX_weight means a weight is assigned to the data, influencing the objective function and gradient
|
||||
# XXX_weight : [d1,d2,w1,w2] means:
|
||||
# if XXX < d1, weight = w1
|
||||
# if d1 <= XXX < d2, weight = w1 + (XXX-d1)/(d2-d1)*(w2-w1), (linear interpolation)
|
||||
# if d2 <= XXX , weight = w2
|
||||
# You can easily set w1 = w2 = 1.0 to normalize the weight related to XXX.
|
||||
# -------------- using absolute traveltime data --------------
|
||||
abs_time:
|
||||
use_abs_time: true # 'true' for using absolute traveltime data to update model parameters; 'false' for not using (no need to set parameters in this section)
|
||||
residual_weight: [1, 3, 1, 1] # XXX is the absolute traveltime residual (second) = abs(t^{obs}_{n,i} - t^{syn}_{n,j})
|
||||
distance_weight: [100, 200, 1, 1] # XXX is epicenter distance (km) between the source and receiver related to the data
|
||||
|
||||
# -------------- using common source differential traveltime data --------------
|
||||
cs_dif_time:
|
||||
use_cs_time: false # 'true' for using common source differential traveltime data to update model parameters; 'false' for not using (no need to set parameters in this section)
|
||||
residual_weight: [1, 3, 1, 1] # XXX is the common source differential traveltime residual (second) = abs(t^{obs}_{n,i} - t^{obs}_{n,j} - t^{syn}_{n,i} + t^{syn}_{n,j}).
|
||||
azimuthal_weight: [15, 30, 1, 1] # XXX is the azimuth difference between two separate stations related to the common source.
|
||||
|
||||
# -------------- using common receiver differential traveltime data --------------
|
||||
cr_dif_time:
|
||||
use_cr_time: false # 'true' for using common receiver differential traveltime data to update model parameters; 'false' for not using (no need to set parameters in this section)
|
||||
residual_weight: [1, 3, 1, 1] # XXX is the common receiver differential traveltime residual (second) = abs(t^{obs}_{n,i} - t^{obs}_{m,i} - t^{syn}_{n,i} + t^{syn}_{m,i})
|
||||
azimuthal_weight: [15, 30, 1, 1] # XXX is the azimuth difference between two separate sources related to the common receiver.
|
||||
|
||||
# -------------- global weight of different types of data (to balance the weight of different data) --------------
|
||||
global_weight:
|
||||
balance_data_weight: false # yes: over the total weight of the each type of the data. no: use original weight (below weight for each type of data needs to be set)
|
||||
abs_time_weight: 1 # weight of absolute traveltime data after balance, default: 1.0
|
||||
cs_dif_time_local_weight: 1 # weight of common source differential traveltime data after balance, default: 1.0
|
||||
cr_dif_time_local_weight: 1 # weight of common receiver differential traveltime data after balance, default: 1.0
|
||||
teleseismic_weight: 1 # weight of teleseismic data after balance, default: 1.0 (exclude in this version)
|
||||
|
||||
# -------------- inversion parameters --------------
|
||||
update_slowness : true # update slowness (velocity) or not. default: true
|
||||
update_azi_ani : true # update azimuthal anisotropy (xi, eta) or not. default: false
|
||||
@@ -0,0 +1,50 @@
|
||||
version: 3
|
||||
|
||||
#################################################
|
||||
# computational domian #
|
||||
#################################################
|
||||
domain:
|
||||
min_max_dep: [-10, 50] # depth in km
|
||||
min_max_lat: [0, 2] # latitude in degree
|
||||
min_max_lon: [0, 2] # longitude in degree
|
||||
n_rtp: [61, 61, 61] # number of nodes in depth,latitude,longitude direction
|
||||
|
||||
#################################################
|
||||
# traveltime data file path #
|
||||
#################################################
|
||||
source:
|
||||
src_rec_file: 1_src_rec_files/src_rec_config.dat # source receiver file path
|
||||
swap_src_rec: true # swap source and receiver
|
||||
|
||||
#################################################
|
||||
# initial model file path #
|
||||
#################################################
|
||||
model:
|
||||
init_model_path: 2_models/model_ckb_N61_61_61.h5 # path to initial model file
|
||||
|
||||
#################################################
|
||||
# parallel computation settings #
|
||||
#################################################
|
||||
parallel: # parameters for parallel computation
|
||||
n_sims: 8 # number of simultanoues runs (parallel the sources)
|
||||
ndiv_rtp: [1, 1, 1] # number of subdivision on each direction (parallel the computional domain)
|
||||
|
||||
############################################
|
||||
# output file setting #
|
||||
############################################
|
||||
output_setting:
|
||||
output_dir: OUTPUT_FILES/OUTPUT_FILES_signal # path to output director (default is ./OUTPUT_FILES/)
|
||||
output_final_model: true # output merged final model (final_model.h5) or not.
|
||||
output_in_process: false # output model at each inv iteration or not.
|
||||
output_in_process_data: false # output src_rec_file at each inv iteration or not.
|
||||
output_file_format: 0
|
||||
|
||||
#################################################
|
||||
# inversion or forward modeling #
|
||||
#################################################
|
||||
# run mode
|
||||
# 0 for forward simulation only,
|
||||
# 1 for inversion
|
||||
# 2 for earthquake relocation
|
||||
# 3 for inversion + earthquake relocation
|
||||
run_mode: 0
|
||||
29
examples/eg1_seismic_tomography/README.md
Normal file
29
examples/eg1_seismic_tomography/README.md
Normal file
@@ -0,0 +1,29 @@
|
||||
# seismic tomography
|
||||
|
||||
This is a toy model to invert traveltimes for Vp and anisotropy (Figure 8c.)
|
||||
|
||||
Reference:
|
||||
[1] J. Chen, M. Nagaso, M. Xu, and P. Tong, TomoATT: An open-source package for Eikonal equation-based adjoint-state traveltime tomography for seismic velocity and azimuthal anisotropy, submitted.
|
||||
https://doi.org/10.48550/arXiv.2412.00031
|
||||
|
||||
Python modules are required to initiate the inversion and to plot final results:
|
||||
- h5py
|
||||
- PyTomoAT
|
||||
- Pygmt
|
||||
- gmt
|
||||
|
||||
Run this example:
|
||||
|
||||
1. Run bash script `bash run_this_example.sh` to execute the test.
|
||||
|
||||
2. After inversion, run `plot_output.py` to plot the results.
|
||||
|
||||
The initial and true models:
|
||||
|
||||

|
||||
|
||||
The inversion result:
|
||||
|
||||

|
||||
|
||||
|
||||
16
examples/eg1_seismic_tomography/assign_gaussian_noise.py
Normal file
16
examples/eg1_seismic_tomography/assign_gaussian_noise.py
Normal file
@@ -0,0 +1,16 @@
|
||||
from pytomoatt.src_rec import SrcRec
|
||||
|
||||
def assign_noise_to_src_rec_file(in_fname, out_fname, noise_level=0.1):
|
||||
sr = SrcRec.read(in_fname)
|
||||
sr.add_noise(noise_level)
|
||||
sr.write(out_fname)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
in_fname = "OUTPUT_FILES/OUTPUT_FILES_signal/src_rec_file_forward.dat" # input source receiver file
|
||||
out_fname = "OUTPUT_FILES/OUTPUT_FILES_signal/src_rec_file_forward_noisy.dat" # output source receiver file
|
||||
sigma = 0.1 # noise level in seconds
|
||||
assign_noise_to_src_rec_file(in_fname, out_fname, noise_level=sigma)
|
||||
|
||||
|
||||
|
||||
250
examples/eg1_seismic_tomography/plot_output.py
Normal file
250
examples/eg1_seismic_tomography/plot_output.py
Normal file
@@ -0,0 +1,250 @@
|
||||
# %%
|
||||
import pygmt
|
||||
pygmt.config(FONT="16p", IO_SEGMENT_MARKER="<<<")
|
||||
|
||||
import os
|
||||
|
||||
# %%
|
||||
from pytomoatt.model import ATTModel
|
||||
from pytomoatt.data import ATTData
|
||||
import numpy as np
|
||||
|
||||
# %%
|
||||
# read models
|
||||
|
||||
Ngrid = [61,61,61]
|
||||
data_file = '2_models/model_init_N%d_%d_%d.h5'%(Ngrid[0],Ngrid[1],Ngrid[2])
|
||||
par_file = '3_input_params/input_params_signal.yaml'
|
||||
model = ATTModel.read(data_file, par_file)
|
||||
initial_model = model.to_xarray()
|
||||
|
||||
data_file = '2_models/model_ckb_N%d_%d_%d.h5'%(Ngrid[0],Ngrid[1],Ngrid[2])
|
||||
model = ATTModel.read(data_file, par_file)
|
||||
ckb_model = model.to_xarray()
|
||||
|
||||
# initial model
|
||||
depth = 10.0
|
||||
vel_init = initial_model.interp_dep(depth, field='vel')
|
||||
start = [1.25,0]; end = [1.25,2]
|
||||
vel_init_sec = initial_model.interp_sec(start, end, field='vel', val = 1)
|
||||
|
||||
# checkerboard model
|
||||
vel_ckb = ckb_model.interp_dep(depth, field='vel') # lon = [:,0], lat = [:,1], vel = [:,2]
|
||||
vel_ckb_sec = ckb_model.interp_sec(start, end, field='vel', val = 1)
|
||||
|
||||
# anisotropic arrow
|
||||
samp_interval = 3
|
||||
length = 7
|
||||
width = 0.1
|
||||
ani_thd = 0.02
|
||||
|
||||
ani_ckb_phi = ckb_model.interp_dep(depth, field='phi', samp_interval=samp_interval)
|
||||
ani_ckb_epsilon = ckb_model.interp_dep(depth, field='epsilon', samp_interval=samp_interval)
|
||||
ani_ckb = np.hstack([ani_ckb_phi, ani_ckb_epsilon[:,2].reshape(-1, 1)*length, np.ones((ani_ckb_epsilon.shape[0],1))*width]) # lon, lat, angle, length, width
|
||||
idx = np.where(ani_ckb_epsilon[:,2] > ani_thd)
|
||||
ani_ckb = ani_ckb[idx[0],:]
|
||||
|
||||
try:
|
||||
os.mkdir('img')
|
||||
except:
|
||||
pass
|
||||
|
||||
# %%
|
||||
# read src_rec_file for data
|
||||
from pytomoatt.src_rec import SrcRec
|
||||
|
||||
sr = SrcRec.read('1_src_rec_files/src_rec_config.dat')
|
||||
station = sr.receivers[['stlo','stla','stel']].values.T
|
||||
true_loc = sr.sources[['evlo','evla','evdp']].values.T
|
||||
earthquake = true_loc
|
||||
|
||||
# %%
|
||||
# categorize earthquakes
|
||||
ev_idx1 = []
|
||||
ev_idx2 = []
|
||||
ev_idx3 = []
|
||||
for i in range(earthquake.shape[1]):
|
||||
dep = earthquake[2,i]
|
||||
if dep < 15:
|
||||
ev_idx1.append(i)
|
||||
elif dep < 25:
|
||||
ev_idx2.append(i)
|
||||
elif dep < 35:
|
||||
ev_idx3.append(i)
|
||||
|
||||
# %%
|
||||
# plot the checkerboard model
|
||||
fig = pygmt.Figure()
|
||||
|
||||
region = [0,2,0,2]
|
||||
frame = ["xa1","ya1","nSWe"]
|
||||
projection = "M10c"
|
||||
spacing = 0.04
|
||||
|
||||
vel_range = 20
|
||||
|
||||
# -------------- initial model and earthquake location --------------
|
||||
fig.basemap(region=region, frame=["xa1","ya1","+tInitial model and locations"], projection=projection)
|
||||
# velocity perturbation
|
||||
pygmt.makecpt(cmap="../utils/svel13_chen.cpt", series=[-vel_range, vel_range], background=True, reverse=False)
|
||||
x = vel_init[:,0]; y = vel_init[:,1]; value = (vel_init[:,2] - vel_init[:,2])/vel_init[:,2] * 100
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing=spacing,region=region)
|
||||
fig.grdimage(grid = grid)
|
||||
# earthquakes
|
||||
fig.plot(x = true_loc[0,ev_idx1], y = true_loc[1,ev_idx1], style = "c0.1c", fill = "red")
|
||||
fig.plot(x = true_loc[0,ev_idx2], y = true_loc[1,ev_idx2], style = "c0.1c", fill = "green")
|
||||
fig.plot(x = true_loc[0,ev_idx3], y = true_loc[1,ev_idx3], style = "c0.1c", fill = "black")
|
||||
|
||||
# stations
|
||||
fig.plot(x = station[0,:], y = station[1,:], style = "t0.4c", fill = "blue", pen = "black", label = "Station")
|
||||
|
||||
# # anisotropic arrow
|
||||
# fig.plot(ani_ckb, style='j', fill='yellow1', pen='0.5p,black')
|
||||
|
||||
fig.shift_origin(xshift=11)
|
||||
|
||||
fig.basemap(region=[0,40,0,2], frame=["xa20+lDepth (km)","ya1","Nswe"], projection="X2c/10c")
|
||||
x = vel_init_sec[:,3]; y = vel_init_sec[:,1]; value = (vel_init_sec[:,4] - vel_init_sec[:,4])/vel_init_sec[:,4] * 100
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing="1/0.04",region=[0,40,0,2])
|
||||
fig.grdimage(grid = grid)
|
||||
|
||||
# earthquakes
|
||||
fig.plot(x = true_loc[2,ev_idx1], y = true_loc[1,ev_idx1], style = "c0.1c", fill = "red")
|
||||
fig.plot(x = true_loc[2,ev_idx2], y = true_loc[1,ev_idx2], style = "c0.1c", fill = "green")
|
||||
fig.plot(x = true_loc[2,ev_idx3], y = true_loc[1,ev_idx3], style = "c0.1c", fill = "black")
|
||||
|
||||
|
||||
fig.shift_origin(xshift=4)
|
||||
|
||||
|
||||
|
||||
|
||||
# -------------- checkerboard model --------------
|
||||
fig.basemap(region=region, frame=["xa1","ya1","+tTrue model and locations"], projection=projection)
|
||||
# velocity perturbation
|
||||
pygmt.makecpt(cmap="../utils/svel13_chen.cpt", series=[-vel_range, vel_range], background=True, reverse=False)
|
||||
x = vel_ckb[:,0]; y = vel_ckb[:,1]; value = (vel_ckb[:,2] - vel_init[:,2])/vel_init[:,2] * 100
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing=spacing,region=region)
|
||||
fig.grdimage(grid = grid)
|
||||
# earthquakes
|
||||
fig.plot(x = earthquake[0,ev_idx1], y = earthquake[1,ev_idx1], style = "c0.1c", fill = "red")
|
||||
fig.plot(x = earthquake[0,ev_idx2], y = earthquake[1,ev_idx2], style = "c0.1c", fill = "green")
|
||||
fig.plot(x = earthquake[0,ev_idx3], y = earthquake[1,ev_idx3], style = "c0.1c", fill = "black")
|
||||
|
||||
# stations
|
||||
# fig.plot(x = loc_st[0,:], y = loc_st[1,:], style = "t0.4c", fill = "blue", pen = "black", label = "Station")
|
||||
|
||||
# anisotropic arrow
|
||||
fig.plot(ani_ckb, style='j', fill='yellow1', pen='0.5p,black')
|
||||
|
||||
fig.shift_origin(xshift=11)
|
||||
|
||||
fig.basemap(region=[0,40,0,2], frame=["xa20+lDepth (km)","ya1","Nswe"], projection="X2c/10c")
|
||||
x = vel_ckb_sec[:,3]; y = vel_ckb_sec[:,1]; value = (vel_ckb_sec[:,4] - vel_init_sec[:,4])/vel_init_sec[:,4] * 100
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing="1/0.04",region=[0,40,0,2])
|
||||
fig.grdimage(grid = grid)
|
||||
|
||||
# earthquakes
|
||||
fig.plot(x = earthquake[2,ev_idx1], y = earthquake[1,ev_idx1], style = "c0.1c", fill = "red")
|
||||
fig.plot(x = earthquake[2,ev_idx2], y = earthquake[1,ev_idx2], style = "c0.1c", fill = "green")
|
||||
fig.plot(x = earthquake[2,ev_idx3], y = earthquake[1,ev_idx3], style = "c0.1c", fill = "black")
|
||||
|
||||
|
||||
# ------------------- colorbar -------------------
|
||||
fig.shift_origin(xshift=-11, yshift=-1.5)
|
||||
fig.colorbar(frame = ["a%f"%(vel_range),"x+ldlnVp (%)"], position="+e+w4c/0.3c+h")
|
||||
|
||||
fig.shift_origin(xshift=6, yshift=-1)
|
||||
fig.basemap(region=[0,1,0,1], frame=["wesn"], projection="X6c/1.5c")
|
||||
ani = [
|
||||
[0.2, 0.6, 45, 0.02*length, width], # lon, lat, phi, epsilon, size
|
||||
[0.5, 0.6, 45, 0.05*length, width],
|
||||
[0.8, 0.6, 45, 0.10*length, width],
|
||||
]
|
||||
fig.plot(ani, style='j', fill='yellow1', pen='0.5p,black')
|
||||
fig.text(text=["0.02", "0.05", "0.10"], x=[0.2,0.5,0.8], y=[0.2]*3, font="16p,Helvetica", justify="CM")
|
||||
fig.shift_origin(xshift= 11, yshift=2.5)
|
||||
|
||||
fig.show()
|
||||
fig.savefig('img/model_setting.png', dpi=300)
|
||||
|
||||
# %%
|
||||
# plot the inversion result
|
||||
|
||||
# read models
|
||||
tag = "inv"
|
||||
data_file = "OUTPUT_FILES/OUTPUT_FILES_%s/final_model.h5"%(tag)
|
||||
model = ATTModel.read(data_file, par_file)
|
||||
inv_model = model.to_xarray()
|
||||
vel_inv = inv_model.interp_dep(depth, field='vel') # lon = [:,0], lat = [:,1], vel = [:,2]
|
||||
x = vel_inv[:,0]; y = vel_inv[:,1]; value = (vel_inv[:,2] - vel_init[:,2])/vel_init[:,2] * 100
|
||||
vel_inv_sec = inv_model.interp_sec(start, end, field='vel', val = 1)
|
||||
x_sec = vel_inv_sec[:,3]; y_sec = vel_inv_sec[:,1]; value_sec = (vel_inv_sec[:,4] - vel_init_sec[:,4])/vel_init_sec[:,4] * 100
|
||||
|
||||
ani_inv_phi = inv_model.interp_dep(depth, field='phi', samp_interval=samp_interval)
|
||||
ani_inv_epsilon = inv_model.interp_dep(depth, field='epsilon', samp_interval=samp_interval)
|
||||
ani_inv = np.hstack([ani_inv_phi, ani_inv_epsilon[:,2].reshape(-1, 1)*length, np.ones((ani_inv_epsilon.shape[0],1))*width]) # lon, lat, angle, length, width
|
||||
idx = np.where(ani_inv_epsilon[:,2] > ani_thd)
|
||||
ani_inv = ani_inv[idx[0],:]
|
||||
|
||||
# plot the inversion result
|
||||
|
||||
fig = pygmt.Figure()
|
||||
|
||||
region = [0,2,0,2]
|
||||
frame = ["xa1","ya1","+tInversion results"]
|
||||
projection = "M10c"
|
||||
spacing = 0.04
|
||||
|
||||
vel_range = 20
|
||||
|
||||
# -------------- checkerboard model --------------
|
||||
fig.basemap(region=region, frame=frame, projection=projection)
|
||||
# velocity perturbation
|
||||
pygmt.makecpt(cmap="../utils/svel13_chen.cpt", series=[-vel_range, vel_range], background=True, reverse=False)
|
||||
x = vel_inv[:,0]; y = vel_inv[:,1]; value = (vel_inv[:,2] - vel_init[:,2])/vel_init[:,2] * 100
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing=spacing,region=region)
|
||||
fig.grdimage(grid = grid)
|
||||
# # earthquakes
|
||||
# fig.plot(x = earthquake[0,ev_idx1], y = earthquake[1,ev_idx1], style = "c0.1c", fill = "red")
|
||||
# fig.plot(x = earthquake[0,ev_idx2], y = earthquake[1,ev_idx2], style = "c0.1c", fill = "green")
|
||||
# fig.plot(x = earthquake[0,ev_idx3], y = earthquake[1,ev_idx3], style = "c0.1c", fill = "black")
|
||||
|
||||
# stations
|
||||
# fig.plot(x = loc_st[0,:], y = loc_st[1,:], style = "t0.4c", fill = "blue", pen = "black", label = "Station")
|
||||
|
||||
# anisotropic arrow
|
||||
fig.plot(ani_inv, style='j', fill='yellow1', pen='0.5p,black')
|
||||
|
||||
fig.shift_origin(xshift=11)
|
||||
|
||||
fig.basemap(region=[0,40,0,2], frame=["xa20+lDepth (km)","ya1","Nswe"], projection="X2c/10c")
|
||||
x = vel_inv_sec[:,3]; y = vel_inv_sec[:,1]; value = (vel_inv_sec[:,4] - vel_init_sec[:,4])/vel_init_sec[:,4] * 100
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing="1/0.04",region=[0,40,0,2])
|
||||
fig.grdimage(grid = grid)
|
||||
|
||||
# # earthquakes
|
||||
# fig.plot(x = earthquake[2,ev_idx1], y = earthquake[1,ev_idx1], style = "c0.1c", fill = "red")
|
||||
# fig.plot(x = earthquake[2,ev_idx2], y = earthquake[1,ev_idx2], style = "c0.1c", fill = "green")
|
||||
# fig.plot(x = earthquake[2,ev_idx3], y = earthquake[1,ev_idx3], style = "c0.1c", fill = "black")
|
||||
|
||||
# ------------------- colorbar -------------------
|
||||
fig.shift_origin(xshift=-11, yshift=-1.5)
|
||||
fig.colorbar(frame = ["a%f"%(vel_range),"x+ldlnVp (%)"], position="+e+w4c/0.3c+h")
|
||||
|
||||
fig.shift_origin(xshift=6, yshift=-1)
|
||||
fig.basemap(region=[0,1,0,1], frame=["wesn"], projection="X6c/1.5c")
|
||||
ani = [
|
||||
[0.2, 0.6, 45, 0.02*length, width], # lon, lat, phi, epsilon, size
|
||||
[0.5, 0.6, 45, 0.05*length, width],
|
||||
[0.8, 0.6, 45, 0.10*length, width],
|
||||
]
|
||||
fig.plot(ani, style='j', fill='yellow1', pen='0.5p,black')
|
||||
fig.text(text=["0.02", "0.05", "0.10"], x=[0.2,0.5,0.8], y=[0.2]*3, font="16p,Helvetica", justify="CM")
|
||||
fig.shift_origin(xshift= 11, yshift=2.5)
|
||||
|
||||
|
||||
fig.show()
|
||||
fig.savefig('img/model_%s.png'%(tag), dpi=300)
|
||||
|
||||
|
||||
63
examples/eg1_seismic_tomography/prepare_input_files.py
Normal file
63
examples/eg1_seismic_tomography/prepare_input_files.py
Normal file
@@ -0,0 +1,63 @@
|
||||
# download src_ref_files from Zenodo
|
||||
import os
|
||||
import numpy as np
|
||||
import sys
|
||||
try:
|
||||
from pytomoatt.model import ATTModel
|
||||
from pytomoatt.checkerboard import Checker
|
||||
from pytomoatt.src_rec import SrcRec
|
||||
except:
|
||||
print("ERROR: ATTModel not found. Please install pytomoatt first."
|
||||
"See https://tomoatt.github.io/PyTomoATT/installation.html for details.")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
class BuildInitialModel():
|
||||
def __init__(self, par_file="./3_input_params/input_params_signal.yaml", output_dir="2_models"):
|
||||
"""
|
||||
Build initial model for tomography inversion
|
||||
"""
|
||||
self.am = ATTModel(par_file)
|
||||
self.output_dir = output_dir
|
||||
|
||||
def build_initial_model(self, vel_min=5.0, vel_max=8.0):
|
||||
"""
|
||||
Build initial model for tomography inversion
|
||||
"""
|
||||
self.am.vel[self.am.depths < 0, :, :] = vel_min
|
||||
idx = np.where((0 <= self.am.depths) & (self.am.depths < 40.0))[0]
|
||||
self.am.vel[idx, :, :] = np.linspace(vel_min, vel_max, idx.size)[::-1][:, np.newaxis, np.newaxis]
|
||||
self.am.vel[self.am.depths >= 40.0, :, :] = vel_max
|
||||
|
||||
|
||||
def build_ckb_model(output_dir="2_models"):
|
||||
cbk = Checker(f'{output_dir}/model_init_N61_61_61.h5', para_fname="./3_input_params/input_params_signal.yaml")
|
||||
cbk.checkerboard(
|
||||
n_pert_x=2, n_pert_y=2, n_pert_z=2,
|
||||
pert_vel=0.2, pert_ani=0.1, ani_dir=60.0,
|
||||
lim_x=[0.5, 1.5], lim_y=[0.5, 1.5], lim_z=[0, 40]
|
||||
)
|
||||
cbk.write(f'{output_dir}/model_ckb_N61_61_61.h5')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# download src_rec_config.dat
|
||||
url = 'https://zenodo.org/records/14053821/files/src_rec_config.dat'
|
||||
path = "1_src_rec_files/src_rec_config.dat"
|
||||
os.makedirs(os.path.dirname(path), exist_ok=True)
|
||||
if not os.path.exists(path):
|
||||
sr = SrcRec.read(url)
|
||||
sr.write(path)
|
||||
|
||||
# build initial model
|
||||
output_dir = "2_models"
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
bim = BuildInitialModel(output_dir=output_dir)
|
||||
bim.build_initial_model()
|
||||
bim.am.write('{}/model_init_N{:d}_{:d}_{:d}.h5'.format(bim.output_dir, *bim.am.n_rtp))
|
||||
|
||||
# build ckb model
|
||||
build_ckb_model(output_dir)
|
||||
|
||||
|
||||
|
||||
29
examples/eg1_seismic_tomography/run_this_example.sh
Normal file
29
examples/eg1_seismic_tomography/run_this_example.sh
Normal file
@@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
# Step 1: Generate necessary input files
|
||||
python prepare_input_files.py
|
||||
|
||||
# Step 2: Run forward modeling
|
||||
# for WSL
|
||||
# mpirun -n 8 --allow-run-as-root --oversubscribe ../../build/bin/TOMOATT -i 3_input_params/input_params_signal.yaml
|
||||
# # for Linux
|
||||
# mpirun -n 8 ../../build/bin/TOMOATT -i 3_input_params/input_params_signal.yaml
|
||||
# for conda install
|
||||
mpirun -n 8 TOMOATT -i 3_input_params/input_params_signal.yaml
|
||||
|
||||
|
||||
# Step 3: Assign data noise to the observational data
|
||||
python assign_gaussian_noise.py
|
||||
|
||||
# Step 4: Do inversion
|
||||
# for WSL
|
||||
# mpirun -n 8 --allow-run-as-root --oversubscribe ../../build/bin/TOMOATT -i 3_input_params/input_params_inv.yaml
|
||||
# # for Linux
|
||||
# mpirun -n 8 ../../build/bin/TOMOATT -i 3_input_params/input_params_inv.yaml
|
||||
# for conda install
|
||||
mpirun -n 8 TOMOATT -i 3_input_params/input_params_inv.yaml
|
||||
|
||||
|
||||
# Step 5 (Optional): Plot the results
|
||||
python plot_output.py
|
||||
@@ -0,0 +1,114 @@
|
||||
version: 3
|
||||
|
||||
#################################################
|
||||
# computational domian #
|
||||
#################################################
|
||||
domain:
|
||||
min_max_dep: [-10, 50] # depth in km
|
||||
min_max_lat: [0, 2] # latitude in degree
|
||||
min_max_lon: [0, 2] # longitude in degree
|
||||
n_rtp: [61, 61, 61] # number of nodes in depth,latitude,longitude direction
|
||||
|
||||
#################################################
|
||||
# traveltime data file path #
|
||||
#################################################
|
||||
source:
|
||||
src_rec_file: OUTPUT_FILES/OUTPUT_FILES_signal/src_rec_file_forward_errloc.dat # source receiver file path
|
||||
swap_src_rec: true # swap source and receiver
|
||||
|
||||
#################################################
|
||||
# initial model file path #
|
||||
#################################################
|
||||
model:
|
||||
init_model_path: 2_models/model_ckb_N61_61_61.h5 # path to initial model file
|
||||
|
||||
#################################################
|
||||
# parallel computation settings #
|
||||
#################################################
|
||||
parallel: # parameters for parallel computation
|
||||
n_sims: 8 # number of simultanoues runs (parallel the sources)
|
||||
ndiv_rtp: [1, 1, 1] # number of subdivision on each direction (parallel the computional domain)
|
||||
|
||||
############################################
|
||||
# output file setting #
|
||||
############################################
|
||||
output_setting:
|
||||
output_dir: OUTPUT_FILES/OUTPUT_FILES_loc # path to output director (default is ./OUTPUT_FILES/)
|
||||
output_final_model: true # output merged final model (final_model.h5) or not.
|
||||
output_in_process: false # output model at each inv iteration or not.
|
||||
output_in_process_data: false # output src_rec_file at each inv iteration or not.
|
||||
output_file_format: 0
|
||||
|
||||
# output files:
|
||||
# File: 'out_data_grid.h5'. Keys: ['Mesh']['elem_conn'], element index;
|
||||
# ['Mesh']['node_coords_p'], phi coordinates of nodes;
|
||||
# ['Mesh']['node_coords_t'], theta coordinates of nodes;
|
||||
# ['Mesh']['node_coords_r'], r coordinates of nodes;
|
||||
# ['Mesh']['node_coords_x'], phi coordinates of elements;
|
||||
# ['Mesh']['node_coords_y'], theta coordinates of elements;
|
||||
# ['Mesh']['node_coords_z'], r coordinates of elements;
|
||||
# File: 'out_data_sim_group_0'. Keys: ['model']['vel_inv_XXXX'], velocity model at iteration XXXX;
|
||||
# ['model']['xi_inv_XXXX'], xi model at iteration XXXX;
|
||||
# ['model']['eta_inv_XXXX'], eta model at iteration XXXX
|
||||
# ['model']['Ks_inv_XXXX'], sensitivity kernel related to slowness at iteration XXXX
|
||||
# ['model']['Kxi_inv_XXXX'], sensitivity kernel related to xi at iteration XXXX
|
||||
# ['model']['Keta_inv_XXXX'], sensitivity kernel related to eta at iteration XXXX
|
||||
# ['model']['Ks_density_inv_XXXX'], kernel density of Ks at iteration XXXX
|
||||
# ['model']['Kxi_density_inv_XXXX'], kernel density of Kxi at iteration XXXX
|
||||
# ['model']['Keta_density_inv_XXXX'], kernel density of Keta at iteration XXXX
|
||||
# ['model']['Ks_over_Kden_inv_XXXX'], slowness kernel over kernel density at iteration XXXX
|
||||
# ['model']['Kxi_over_Kden_inv_XXXX'], xi kernel over kernel density at iteration XXXX
|
||||
# ['model']['Keta_over_Kden_inv_XXXX'], eta kernel over kernel density at iteration XXXX
|
||||
# ['model']['Ks_update_inv_XXXX'], slowness kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['model']['Kxi_update_inv_XXXX'], xi kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['model']['Keta_update_inv_XXXX'], eta kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['1dinv']['vel_1dinv_inv_XXXX'], 2d velocity model at iteration XXXX, in 1d inversion mode
|
||||
# ['1dinv']['r_1dinv'], r coordinates (depth), in 1d inversion mode
|
||||
# ['1dinv']['t_1dinv'], t coordinates (epicenter distance), in 1d inversion mode
|
||||
# File: 'src_rec_file_step_XXXX.dat' or 'src_rec_file_forward.dat'. The synthetic traveltime data file.
|
||||
# File: 'final_model.h5'. Keys: ['eta'], ['xi'], ['vel'], the final model.
|
||||
# File: 'middle_model_step_XXXX.h5'. Keys: ['eta'], ['xi'], ['vel'], the model at step XXXX.
|
||||
# File: 'inversion_grid.txt'. The location of inversion grid nodes
|
||||
# File: 'objective_function.txt'. The objective function value at each iteration
|
||||
# File: 'out_data_sim_group_X'. Keys: ['src_YYYY']['time_field_inv_XXXX'], traveltime field of source YYYY at iteration XXXX;
|
||||
# ['src_YYYY']['adjoint_field_inv_XXXX'], adjoint field of source YYYY at iteration XXXX;
|
||||
# ['1dinv']['time_field_1dinv_YYYY_inv_XXXX'], 2d traveltime field of source YYYY at iteration XXXX, in 1d inversion mode
|
||||
# ['1dinv']['adjoint_field_1dinv_YYYY_inv_XXXX'], 2d adjoint field of source YYYY at iteration XXXX, in 1d inversion mode
|
||||
|
||||
|
||||
#################################################
|
||||
# inversion or forward modeling #
|
||||
#################################################
|
||||
# run mode
|
||||
# 0 for forward simulation only,
|
||||
# 1 for inversion
|
||||
# 2 for earthquake relocation
|
||||
# 3 for inversion + earthquake relocation
|
||||
# 4 for 1d model inversion
|
||||
run_mode: 2
|
||||
|
||||
#################################################
|
||||
# relocation parameters setting #
|
||||
#################################################
|
||||
relocation: # update earthquake hypocenter and origin time (when run_mode : 2 and 3)
|
||||
min_Ndata: 4 # if the number of data of the earthquake is less than <min_Ndata>, the earthquake will not be relocated. defaut value: 4
|
||||
|
||||
# relocation_strategy
|
||||
step_length : 0.01 # initial step length of relocation perturbation. 0.01 means maximum 1% perturbation for each iteration.
|
||||
step_length_decay : 0.9 # if objective function increase, step size -> step length * step_length_decay. default: 0.9
|
||||
rescaling_dep_lat_lon_ortime: [10.0, 15.0, 15.0, 1.0] # The perturbation is related to <rescaling_dep_lat_lon_ortime>. Unit: km,km,km,second
|
||||
max_change_dep_lat_lon_ortime: [10.0, 15.0, 15.0, 1.0] # the change of dep,lat,lon,ortime do not exceed max_change. Unit: km,km,km,second
|
||||
max_iterations : 201 # maximum number of iterations for relocation
|
||||
tol_gradient : 0.0001 # if the norm of gradient is smaller than the tolerance, the iteration of relocation terminates
|
||||
|
||||
# -------------- using absolute traveltime data --------------
|
||||
abs_time:
|
||||
use_abs_time : true # 'yes' for using absolute traveltime data to update ortime and location; 'no' for not using (no need to set parameters in this section)
|
||||
|
||||
# -------------- using common source differential traveltime data --------------
|
||||
cs_dif_time:
|
||||
use_cs_time : false # 'yes' for using common source differential traveltime data to update ortime and location; 'no' for not using (no need to set parameters in this section)
|
||||
|
||||
# -------------- using common receiver differential traveltime data --------------
|
||||
cr_dif_time:
|
||||
use_cr_time : false # 'yes' for using common receiver differential traveltime data to update ortime and location; 'no' for not using (no need to set parameters in this section)
|
||||
@@ -0,0 +1,50 @@
|
||||
version: 3
|
||||
|
||||
#################################################
|
||||
# computational domian #
|
||||
#################################################
|
||||
domain:
|
||||
min_max_dep: [-10, 50] # depth in km
|
||||
min_max_lat: [0, 2] # latitude in degree
|
||||
min_max_lon: [0, 2] # longitude in degree
|
||||
n_rtp: [61, 61, 61] # number of nodes in depth,latitude,longitude direction
|
||||
|
||||
#################################################
|
||||
# traveltime data file path #
|
||||
#################################################
|
||||
source:
|
||||
src_rec_file: 1_src_rec_files/src_rec_config.dat # source receiver file path
|
||||
swap_src_rec: true # swap source and receiver
|
||||
|
||||
#################################################
|
||||
# initial model file path #
|
||||
#################################################
|
||||
model:
|
||||
init_model_path: 2_models/model_ckb_N61_61_61.h5 # path to initial model file
|
||||
|
||||
#################################################
|
||||
# parallel computation settings #
|
||||
#################################################
|
||||
parallel: # parameters for parallel computation
|
||||
n_sims: 8 # number of simultanoues runs (parallel the sources)
|
||||
ndiv_rtp: [1, 1, 1] # number of subdivision on each direction (parallel the computional domain)
|
||||
|
||||
############################################
|
||||
# output file setting #
|
||||
############################################
|
||||
output_setting:
|
||||
output_dir: OUTPUT_FILES/OUTPUT_FILES_signal # path to output director (default is ./OUTPUT_FILES/)
|
||||
output_final_model: true # output merged final model (final_model.h5) or not.
|
||||
output_in_process: false # output model at each inv iteration or not.
|
||||
output_in_process_data: false # output src_rec_file at each inv iteration or not.
|
||||
output_file_format: 0
|
||||
|
||||
#################################################
|
||||
# inversion or forward modeling #
|
||||
#################################################
|
||||
# run mode
|
||||
# 0 for forward simulation only,
|
||||
# 1 for inversion
|
||||
# 2 for earthquake relocation
|
||||
# 3 for inversion + earthquake relocation
|
||||
run_mode: 0
|
||||
29
examples/eg2_earthquake_location/README.md
Normal file
29
examples/eg2_earthquake_location/README.md
Normal file
@@ -0,0 +1,29 @@
|
||||
# earthquake location
|
||||
|
||||
This is a toy model to invert traveltimes for locating earthquakes (Figure 8a.)
|
||||
|
||||
Reference:
|
||||
[1] J. Chen, M. Nagaso, M. Xu, and P. Tong, TomoATT: An open-source package for Eikonal equation-based adjoint-state traveltime tomography for seismic velocity and azimuthal anisotropy, submitted.
|
||||
https://doi.org/10.48550/arXiv.2412.00031
|
||||
|
||||
Python modules are required to initiate the inversion and to plot final results:
|
||||
- h5py
|
||||
- PyTomoAT
|
||||
- Pygmt
|
||||
- gmt
|
||||
|
||||
Run this example:
|
||||
|
||||
1. Run bash script `bash run_this_example.sh` to execute the test.
|
||||
|
||||
2. After inversion, run `plot_output.py` to plot the results.
|
||||
|
||||
The initial and true models:
|
||||
|
||||

|
||||
|
||||
The location results:
|
||||
|
||||

|
||||
|
||||
|
||||
30
examples/eg2_earthquake_location/assign_gaussian_noise.py
Normal file
30
examples/eg2_earthquake_location/assign_gaussian_noise.py
Normal file
@@ -0,0 +1,30 @@
|
||||
from pytomoatt.src_rec import SrcRec
|
||||
|
||||
class AssignNoise:
|
||||
def __init__(self, in_fname, out_fname):
|
||||
self.in_fname = in_fname
|
||||
self.out_fname = out_fname
|
||||
self.sr = SrcRec.read(self.in_fname)
|
||||
|
||||
def assign_noise_for_tt(self, noise_level=0.1):
|
||||
self.sr.add_noise(noise_level)
|
||||
|
||||
def assign_noise_for_src(self, lat_pert=0.1, lon_pert=0.1, dep_pert=10, tau_pert=0.5):
|
||||
self.sr.add_noise_to_source(lat_pert, lon_pert, dep_pert, tau_pert)
|
||||
|
||||
if __name__ == "__main__":
|
||||
in_fname = "OUTPUT_FILES/OUTPUT_FILES_signal/src_rec_file_forward.dat" # input source receiver file
|
||||
out_fname = "OUTPUT_FILES/OUTPUT_FILES_signal/src_rec_file_forward_errloc.dat" # output source receiver file
|
||||
sigma = 0.1 # noise level in seconds
|
||||
lat_pert = 0.1 # assign noise for latitude in degrees
|
||||
lon_pert = 0.1 # assign noise for longitude in degrees
|
||||
dep_pert = 10 # assign noise for depth in km
|
||||
tau_pert = 0.5 # assign noise for origin time in seconds
|
||||
# Initialize the instance
|
||||
an = AssignNoise(in_fname, out_fname)
|
||||
# Assign noise for travel time
|
||||
an.assign_noise_for_tt(sigma)
|
||||
# Assign noise for source
|
||||
an.assign_noise_for_src(lat_pert, lon_pert, dep_pert, tau_pert)
|
||||
# Write the output file
|
||||
an.sr.write(out_fname)
|
||||
254
examples/eg2_earthquake_location/plot_output.py
Normal file
254
examples/eg2_earthquake_location/plot_output.py
Normal file
@@ -0,0 +1,254 @@
|
||||
# %%
|
||||
import pygmt
|
||||
pygmt.config(FONT="16p", IO_SEGMENT_MARKER="<<<")
|
||||
|
||||
import os
|
||||
|
||||
# %%
|
||||
from pytomoatt.model import ATTModel
|
||||
from pytomoatt.data import ATTData
|
||||
import numpy as np
|
||||
|
||||
# %%
|
||||
# read models
|
||||
|
||||
Ngrid = [61,61,61]
|
||||
data_file = '2_models/model_init_N%d_%d_%d.h5'%(Ngrid[0],Ngrid[1],Ngrid[2])
|
||||
par_file = '3_input_params/input_params_signal.yaml'
|
||||
model = ATTModel.read(data_file, par_file)
|
||||
initial_model = model.to_xarray()
|
||||
|
||||
data_file = '2_models/model_ckb_N%d_%d_%d.h5'%(Ngrid[0],Ngrid[1],Ngrid[2])
|
||||
model = ATTModel.read(data_file, par_file)
|
||||
ckb_model = model.to_xarray()
|
||||
|
||||
# initial model
|
||||
depth = 10.0
|
||||
vel_init = initial_model.interp_dep(depth, field='vel')
|
||||
start = [1.25,0]; end = [1.25,2]
|
||||
vel_init_sec = initial_model.interp_sec(start, end, field='vel', val = 1)
|
||||
|
||||
# checkerboard model
|
||||
vel_ckb = ckb_model.interp_dep(depth, field='vel') # lon = [:,0], lat = [:,1], vel = [:,2]
|
||||
vel_ckb_sec = ckb_model.interp_sec(start, end, field='vel', val = 1)
|
||||
|
||||
# anisotropic arrow
|
||||
samp_interval = 3
|
||||
length = 7
|
||||
width = 0.1
|
||||
ani_thd = 0.02
|
||||
|
||||
ani_ckb_phi = ckb_model.interp_dep(depth, field='phi', samp_interval=samp_interval)
|
||||
ani_ckb_epsilon = ckb_model.interp_dep(depth, field='epsilon', samp_interval=samp_interval)
|
||||
ani_ckb = np.hstack([ani_ckb_phi, ani_ckb_epsilon[:,2].reshape(-1, 1)*length, np.ones((ani_ckb_epsilon.shape[0],1))*width]) # lon, lat, angle, length, width
|
||||
idx = np.where(ani_ckb_epsilon[:,2] > ani_thd)
|
||||
ani_ckb = ani_ckb[idx[0],:]
|
||||
|
||||
try:
|
||||
os.mkdir('img')
|
||||
except:
|
||||
pass
|
||||
|
||||
# %%
|
||||
# read src_rec_file for data
|
||||
from pytomoatt.src_rec import SrcRec
|
||||
|
||||
sr = SrcRec.read('1_src_rec_files/src_rec_config.dat')
|
||||
station = sr.receivers[['stlo','stla','stel']].values.T
|
||||
true_loc = sr.sources[['evlo','evla','evdp']].values.T
|
||||
earthquake = true_loc
|
||||
|
||||
sr = SrcRec.read('OUTPUT_FILES/OUTPUT_FILES_signal/src_rec_file_forward_errloc.dat')
|
||||
init_loc = sr.sources[['evlo','evla','evdp']].values.T
|
||||
|
||||
# %%
|
||||
# categorize earthquakes
|
||||
ev_idx1 = []
|
||||
ev_idx2 = []
|
||||
ev_idx3 = []
|
||||
for i in range(earthquake.shape[1]):
|
||||
dep = earthquake[2,i]
|
||||
if dep < 15:
|
||||
ev_idx1.append(i)
|
||||
elif dep < 25:
|
||||
ev_idx2.append(i)
|
||||
elif dep < 35:
|
||||
ev_idx3.append(i)
|
||||
|
||||
# %%
|
||||
# plot the model setting
|
||||
fig = pygmt.Figure()
|
||||
|
||||
region = [0,2,0,2]
|
||||
frame = ["xa1","ya1"]
|
||||
projection = "M10c"
|
||||
spacing = 0.04
|
||||
|
||||
vel_range = 20
|
||||
|
||||
# -------------- initial model and earthquake location --------------
|
||||
fig.basemap(region=region, frame=["xa1","ya1","+tInitial model and locations"], projection=projection)
|
||||
# velocity perturbation
|
||||
pygmt.makecpt(cmap="../utils/svel13_chen.cpt", series=[-vel_range, vel_range], background=True, reverse=False)
|
||||
x = vel_ckb[:,0]; y = vel_ckb[:,1]; value = (vel_ckb[:,2] - vel_init[:,2])/vel_init[:,2] * 100
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing=spacing,region=region)
|
||||
fig.grdimage(grid = grid)
|
||||
# earthquakes
|
||||
fig.plot(x = init_loc[0,ev_idx1], y = init_loc[1,ev_idx1], style = "c0.1c", fill = "red")
|
||||
fig.plot(x = init_loc[0,ev_idx2], y = init_loc[1,ev_idx2], style = "c0.1c", fill = "green")
|
||||
fig.plot(x = init_loc[0,ev_idx3], y = init_loc[1,ev_idx3], style = "c0.1c", fill = "black")
|
||||
|
||||
# stations
|
||||
fig.plot(x = station[0,:], y = station[1,:], style = "t0.4c", fill = "blue", pen = "black", label = "Station")
|
||||
|
||||
# anisotropic arrow
|
||||
fig.plot(ani_ckb, style='j', fill='yellow1', pen='0.5p,black')
|
||||
|
||||
fig.shift_origin(xshift=11)
|
||||
|
||||
fig.basemap(region=[0,40,0,2], frame=["xa20+lDepth (km)","ya1","Nswe"], projection="X2c/10c")
|
||||
x = vel_ckb_sec[:,3]; y = vel_ckb_sec[:,1]; value = (vel_ckb_sec[:,4] - vel_init_sec[:,4])/vel_init_sec[:,4] * 100
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing="1/0.04",region=[0,40,0,2])
|
||||
fig.grdimage(grid = grid)
|
||||
|
||||
# earthquakes
|
||||
fig.plot(x = init_loc[2,ev_idx1], y = init_loc[1,ev_idx1], style = "c0.1c", fill = "red")
|
||||
fig.plot(x = init_loc[2,ev_idx2], y = init_loc[1,ev_idx2], style = "c0.1c", fill = "green")
|
||||
fig.plot(x = init_loc[2,ev_idx3], y = init_loc[1,ev_idx3], style = "c0.1c", fill = "black")
|
||||
|
||||
|
||||
fig.shift_origin(xshift=4)
|
||||
|
||||
|
||||
# -------------- true model and earthquake location --------------
|
||||
fig.basemap(region=region, frame=["xa1","ya1","+tTrue model and locations"], projection=projection)
|
||||
# velocity perturbation
|
||||
pygmt.makecpt(cmap="../utils/svel13_chen.cpt", series=[-vel_range, vel_range], background=True, reverse=False)
|
||||
x = vel_ckb[:,0]; y = vel_ckb[:,1]; value = (vel_ckb[:,2] - vel_init[:,2])/vel_init[:,2] * 100
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing=spacing,region=region)
|
||||
fig.grdimage(grid = grid)
|
||||
# earthquakes
|
||||
fig.plot(x = earthquake[0,ev_idx1], y = earthquake[1,ev_idx1], style = "c0.1c", fill = "red")
|
||||
fig.plot(x = earthquake[0,ev_idx2], y = earthquake[1,ev_idx2], style = "c0.1c", fill = "green")
|
||||
fig.plot(x = earthquake[0,ev_idx3], y = earthquake[1,ev_idx3], style = "c0.1c", fill = "black")
|
||||
|
||||
# stations
|
||||
# fig.plot(x = loc_st[0,:], y = loc_st[1,:], style = "t0.4c", fill = "blue", pen = "black", label = "Station")
|
||||
|
||||
# anisotropic arrow
|
||||
fig.plot(ani_ckb, style='j', fill='yellow1', pen='0.5p,black')
|
||||
|
||||
fig.shift_origin(xshift=11)
|
||||
|
||||
fig.basemap(region=[0,40,0,2], frame=["xa20+lDepth (km)","ya1","Nswe"], projection="X2c/10c")
|
||||
x = vel_ckb_sec[:,3]; y = vel_ckb_sec[:,1]; value = (vel_ckb_sec[:,4] - vel_init_sec[:,4])/vel_init_sec[:,4] * 100
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing="1/0.04",region=[0,40,0,2])
|
||||
fig.grdimage(grid = grid)
|
||||
|
||||
# earthquakes
|
||||
fig.plot(x = earthquake[2,ev_idx1], y = earthquake[1,ev_idx1], style = "c0.1c", fill = "red")
|
||||
fig.plot(x = earthquake[2,ev_idx2], y = earthquake[1,ev_idx2], style = "c0.1c", fill = "green")
|
||||
fig.plot(x = earthquake[2,ev_idx3], y = earthquake[1,ev_idx3], style = "c0.1c", fill = "black")
|
||||
|
||||
|
||||
# ------------------- colorbar -------------------
|
||||
fig.shift_origin(xshift=-11, yshift=-1.5)
|
||||
fig.colorbar(frame = ["a%f"%(vel_range),"x+ldlnVp (%)"], position="+e+w4c/0.3c+h")
|
||||
|
||||
fig.shift_origin(xshift=6, yshift=-1)
|
||||
fig.basemap(region=[0,1,0,1], frame=["wesn"], projection="X6c/1.5c")
|
||||
ani = [
|
||||
[0.2, 0.6, 45, 0.02*length, width], # lon, lat, phi, epsilon, size
|
||||
[0.5, 0.6, 45, 0.05*length, width],
|
||||
[0.8, 0.6, 45, 0.10*length, width],
|
||||
]
|
||||
fig.plot(ani, style='j', fill='yellow1', pen='0.5p,black')
|
||||
fig.text(text=["0.02", "0.05", "0.10"], x=[0.2,0.5,0.8], y=[0.2]*3, font="16p,Helvetica", justify="CM")
|
||||
fig.shift_origin(xshift= 11, yshift=2.5)
|
||||
|
||||
fig.show()
|
||||
fig.savefig('img/model_setting.png', dpi=300)
|
||||
|
||||
# %%
|
||||
# plot the location result
|
||||
|
||||
# read models
|
||||
tag = "loc"
|
||||
data_file = "OUTPUT_FILES/OUTPUT_FILES_%s/final_model.h5"%(tag)
|
||||
model = ATTModel.read(data_file, par_file)
|
||||
inv_model = model.to_xarray()
|
||||
vel_inv = inv_model.interp_dep(depth, field='vel') # lon = [:,0], lat = [:,1], vel = [:,2]
|
||||
x = vel_inv[:,0]; y = vel_inv[:,1]; value = (vel_inv[:,2] - vel_init[:,2])/vel_init[:,2] * 100
|
||||
vel_inv_sec = inv_model.interp_sec(start, end, field='vel', val = 1)
|
||||
x_sec = vel_inv_sec[:,3]; y_sec = vel_inv_sec[:,1]; value_sec = (vel_inv_sec[:,4] - vel_init_sec[:,4])/vel_init_sec[:,4] * 100
|
||||
|
||||
ani_inv_phi = inv_model.interp_dep(depth, field='phi', samp_interval=samp_interval)
|
||||
ani_inv_epsilon = inv_model.interp_dep(depth, field='epsilon', samp_interval=samp_interval)
|
||||
ani_inv = np.hstack([ani_inv_phi, ani_inv_epsilon[:,2].reshape(-1, 1)*length, np.ones((ani_inv_epsilon.shape[0],1))*width]) # lon, lat, angle, length, width
|
||||
idx = np.where(ani_inv_epsilon[:,2] > ani_thd)
|
||||
ani_inv = ani_inv[idx[0],:]
|
||||
|
||||
sr = SrcRec.read('OUTPUT_FILES/OUTPUT_FILES_loc/src_rec_file_reloc_0201.dat')
|
||||
re_loc = sr.sources[['evlo','evla','evdp']].values.T
|
||||
|
||||
# plot the inversion result
|
||||
|
||||
fig = pygmt.Figure()
|
||||
|
||||
region = [0,2,0,2]
|
||||
frame = ["xa1","ya1","+tLocation results"]
|
||||
projection = "M10c"
|
||||
spacing = 0.04
|
||||
|
||||
vel_range = 20
|
||||
|
||||
# -------------- checkerboard model --------------
|
||||
fig.basemap(region=region, frame=frame, projection=projection)
|
||||
# velocity perturbation
|
||||
pygmt.makecpt(cmap="../utils/svel13_chen.cpt", series=[-vel_range, vel_range], background=True, reverse=False)
|
||||
x = vel_inv[:,0]; y = vel_inv[:,1]; value = (vel_inv[:,2] - vel_init[:,2])/vel_init[:,2] * 100
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing=spacing,region=region)
|
||||
fig.grdimage(grid = grid)
|
||||
# earthquakes
|
||||
fig.plot(x = re_loc[0,ev_idx1], y = re_loc[1,ev_idx1], style = "c0.1c", fill = "red")
|
||||
fig.plot(x = re_loc[0,ev_idx2], y = re_loc[1,ev_idx2], style = "c0.1c", fill = "green")
|
||||
fig.plot(x = re_loc[0,ev_idx3], y = re_loc[1,ev_idx3], style = "c0.1c", fill = "black")
|
||||
|
||||
# stations
|
||||
# fig.plot(x = loc_st[0,:], y = loc_st[1,:], style = "t0.4c", fill = "blue", pen = "black", label = "Station")
|
||||
|
||||
# anisotropic arrow
|
||||
fig.plot(ani_inv, style='j', fill='yellow1', pen='0.5p,black')
|
||||
|
||||
fig.shift_origin(xshift=11)
|
||||
|
||||
fig.basemap(region=[0,40,0,2], frame=["xa20+lDepth (km)","ya1","Nswe"], projection="X2c/10c")
|
||||
x = vel_inv_sec[:,3]; y = vel_inv_sec[:,1]; value = (vel_inv_sec[:,4] - vel_init_sec[:,4])/vel_init_sec[:,4] * 100
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing="1/0.04",region=[0,40,0,2])
|
||||
fig.grdimage(grid = grid)
|
||||
|
||||
# earthquakes
|
||||
fig.plot(x = re_loc[2,ev_idx1], y = re_loc[1,ev_idx1], style = "c0.1c", fill = "red")
|
||||
fig.plot(x = re_loc[2,ev_idx2], y = re_loc[1,ev_idx2], style = "c0.1c", fill = "green")
|
||||
fig.plot(x = re_loc[2,ev_idx3], y = re_loc[1,ev_idx3], style = "c0.1c", fill = "black")
|
||||
|
||||
# ------------------- colorbar -------------------
|
||||
fig.shift_origin(xshift=-11, yshift=-1.5)
|
||||
fig.colorbar(frame = ["a%f"%(vel_range),"x+ldlnVp (%)"], position="+e+w4c/0.3c+h")
|
||||
|
||||
fig.shift_origin(xshift=6, yshift=-1)
|
||||
fig.basemap(region=[0,1,0,1], frame=["wesn"], projection="X6c/1.5c")
|
||||
ani = [
|
||||
[0.2, 0.6, 45, 0.02*length, width], # lon, lat, phi, epsilon, size
|
||||
[0.5, 0.6, 45, 0.05*length, width],
|
||||
[0.8, 0.6, 45, 0.10*length, width],
|
||||
]
|
||||
fig.plot(ani, style='j', fill='yellow1', pen='0.5p,black')
|
||||
fig.text(text=["0.02", "0.05", "0.10"], x=[0.2,0.5,0.8], y=[0.2]*3, font="16p,Helvetica", justify="CM")
|
||||
fig.shift_origin(xshift= 11, yshift=2.5)
|
||||
|
||||
|
||||
fig.show()
|
||||
fig.savefig('img/model_%s.png'%(tag), dpi=300)
|
||||
|
||||
|
||||
61
examples/eg2_earthquake_location/prepare_input_files.py
Normal file
61
examples/eg2_earthquake_location/prepare_input_files.py
Normal file
@@ -0,0 +1,61 @@
|
||||
import numpy as np
|
||||
import os
|
||||
import sys
|
||||
try:
|
||||
from pytomoatt.checkerboard import Checker
|
||||
from pytomoatt.src_rec import SrcRec
|
||||
from pytomoatt.model import ATTModel
|
||||
except:
|
||||
print("ERROR: ATTModel not found. Please install pytomoatt first."
|
||||
"See https://tomoatt.github.io/PyTomoATT/installation.html for details.")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
class BuildInitialModel():
|
||||
def __init__(self, par_file="./3_input_params/input_params_signal.yaml", output_dir="2_models"):
|
||||
"""
|
||||
Build initial model for tomography inversion
|
||||
"""
|
||||
self.am = ATTModel(par_file)
|
||||
self.output_dir = output_dir
|
||||
|
||||
def build_initial_model(self, vel_min=5.0, vel_max=8.0):
|
||||
"""
|
||||
Build initial model for tomography inversion
|
||||
"""
|
||||
self.am.vel[self.am.depths < 0, :, :] = vel_min
|
||||
idx = np.where((0 <= self.am.depths) & (self.am.depths < 40.0))[0]
|
||||
self.am.vel[idx, :, :] = np.linspace(vel_min, vel_max, idx.size)[::-1][:, np.newaxis, np.newaxis]
|
||||
self.am.vel[self.am.depths >= 40.0, :, :] = vel_max
|
||||
|
||||
|
||||
def build_ckb_model(output_dir="2_models"):
|
||||
cbk = Checker(f'{output_dir}/model_init_N61_61_61.h5', para_fname="./3_input_params/input_params_signal.yaml")
|
||||
cbk.checkerboard(
|
||||
n_pert_x=2, n_pert_y=2, n_pert_z=2,
|
||||
pert_vel=0.2, pert_ani=0.1, ani_dir=60.0,
|
||||
lim_x=[0.5, 1.5], lim_y=[0.5, 1.5], lim_z=[0, 40]
|
||||
)
|
||||
cbk.write(f'{output_dir}/model_ckb_N61_61_61.h5')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# download src_rec_file
|
||||
url = 'https://zenodo.org/records/14053821/files/src_rec_config.dat'
|
||||
path = "1_src_rec_files/src_rec_config.dat"
|
||||
os.makedirs(os.path.dirname(path), exist_ok=True)
|
||||
if not os.path.exists(path):
|
||||
sr = SrcRec.read(url)
|
||||
sr.write(path)
|
||||
|
||||
# build initial model
|
||||
output_dir = "2_models"
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
bim = BuildInitialModel(output_dir=output_dir)
|
||||
bim.build_initial_model()
|
||||
bim.am.write('{}/model_init_N{:d}_{:d}_{:d}.h5'.format(bim.output_dir, *bim.am.n_rtp))
|
||||
|
||||
build_ckb_model(output_dir)
|
||||
|
||||
|
||||
27
examples/eg2_earthquake_location/run_this_example.sh
Normal file
27
examples/eg2_earthquake_location/run_this_example.sh
Normal file
@@ -0,0 +1,27 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
# Step 1: Generate necessary input files
|
||||
python prepare_input_files.py
|
||||
|
||||
# Step 2: Run forward modeling
|
||||
# # for WSL
|
||||
# mpirun -n 8 --allow-run-as-root --oversubscribe ../../build/bin/TOMOATT -i 3_input_params/input_params_signal.yaml
|
||||
# # for Linux
|
||||
# mpirun -n 8 ../../build/bin/TOMOATT -i 3_input_params/input_params_signal.yaml
|
||||
# for conda install
|
||||
mpirun -n 8 TOMOATT -i 3_input_params/input_params_signal.yaml
|
||||
|
||||
# Step 3: Assign data noise and location perturbation to the observational data
|
||||
python assign_gaussian_noise.py
|
||||
|
||||
# Step 4: Do relocation
|
||||
# # for WSL
|
||||
# mpirun -n 8 --allow-run-as-root --oversubscribe ../../build/bin/TOMOATT -i 3_input_params/input_params_loc.yaml
|
||||
# # for Linux
|
||||
# mpirun -n 8 ../../build/bin/TOMOATT -i 3_input_params/input_params_loc.yaml
|
||||
# for conda install
|
||||
mpirun -n 8 TOMOATT -i 3_input_params/input_params_loc.yaml
|
||||
|
||||
# Step 5 (Optional): Plot the results
|
||||
# python plot_output.py
|
||||
@@ -0,0 +1,114 @@
|
||||
version: 3
|
||||
|
||||
#################################################
|
||||
# computational domian #
|
||||
#################################################
|
||||
domain:
|
||||
min_max_dep: [-10, 50] # depth in km
|
||||
min_max_lat: [0, 2] # latitude in degree
|
||||
min_max_lon: [0, 2] # longitude in degree
|
||||
n_rtp: [61, 61, 61] # number of nodes in depth,latitude,longitude direction
|
||||
|
||||
#################################################
|
||||
# traveltime data file path #
|
||||
#################################################
|
||||
source:
|
||||
src_rec_file: OUTPUT_FILES/OUTPUT_FILES_signal/src_rec_file_forward_errloc.dat # source receiver file path
|
||||
swap_src_rec: true # swap source and receiver
|
||||
|
||||
#################################################
|
||||
# initial model file path #
|
||||
#################################################
|
||||
model:
|
||||
init_model_path: 2_models/model_init_N61_61_61.h5 # path to initial model file
|
||||
|
||||
#################################################
|
||||
# parallel computation settings #
|
||||
#################################################
|
||||
parallel: # parameters for parallel computation
|
||||
n_sims: 8 # number of simultanoues runs (parallel the sources)
|
||||
ndiv_rtp: [1, 1, 1] # number of subdivision on each direction (parallel the computional domain)
|
||||
|
||||
############################################
|
||||
# output file setting #
|
||||
############################################
|
||||
output_setting:
|
||||
output_dir: OUTPUT_FILES/OUTPUT_FILES_joint_step1 # path to output director (default is ./OUTPUT_FILES/)
|
||||
output_final_model: true # output merged final model (final_model.h5) or not.
|
||||
output_in_process: false # output model at each inv iteration or not.
|
||||
output_in_process_data: false # output src_rec_file at each inv iteration or not.
|
||||
output_file_format: 0
|
||||
|
||||
# output files:
|
||||
# File: 'out_data_grid.h5'. Keys: ['Mesh']['elem_conn'], element index;
|
||||
# ['Mesh']['node_coords_p'], phi coordinates of nodes;
|
||||
# ['Mesh']['node_coords_t'], theta coordinates of nodes;
|
||||
# ['Mesh']['node_coords_r'], r coordinates of nodes;
|
||||
# ['Mesh']['node_coords_x'], phi coordinates of elements;
|
||||
# ['Mesh']['node_coords_y'], theta coordinates of elements;
|
||||
# ['Mesh']['node_coords_z'], r coordinates of elements;
|
||||
# File: 'out_data_sim_group_0'. Keys: ['model']['vel_inv_XXXX'], velocity model at iteration XXXX;
|
||||
# ['model']['xi_inv_XXXX'], xi model at iteration XXXX;
|
||||
# ['model']['eta_inv_XXXX'], eta model at iteration XXXX
|
||||
# ['model']['Ks_inv_XXXX'], sensitivity kernel related to slowness at iteration XXXX
|
||||
# ['model']['Kxi_inv_XXXX'], sensitivity kernel related to xi at iteration XXXX
|
||||
# ['model']['Keta_inv_XXXX'], sensitivity kernel related to eta at iteration XXXX
|
||||
# ['model']['Ks_density_inv_XXXX'], kernel density of Ks at iteration XXXX
|
||||
# ['model']['Kxi_density_inv_XXXX'], kernel density of Kxi at iteration XXXX
|
||||
# ['model']['Keta_density_inv_XXXX'], kernel density of Keta at iteration XXXX
|
||||
# ['model']['Ks_over_Kden_inv_XXXX'], slowness kernel over kernel density at iteration XXXX
|
||||
# ['model']['Kxi_over_Kden_inv_XXXX'], xi kernel over kernel density at iteration XXXX
|
||||
# ['model']['Keta_over_Kden_inv_XXXX'], eta kernel over kernel density at iteration XXXX
|
||||
# ['model']['Ks_update_inv_XXXX'], slowness kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['model']['Kxi_update_inv_XXXX'], xi kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['model']['Keta_update_inv_XXXX'], eta kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['1dinv']['vel_1dinv_inv_XXXX'], 2d velocity model at iteration XXXX, in 1d inversion mode
|
||||
# ['1dinv']['r_1dinv'], r coordinates (depth), in 1d inversion mode
|
||||
# ['1dinv']['t_1dinv'], t coordinates (epicenter distance), in 1d inversion mode
|
||||
# File: 'src_rec_file_step_XXXX.dat' or 'src_rec_file_forward.dat'. The synthetic traveltime data file.
|
||||
# File: 'final_model.h5'. Keys: ['eta'], ['xi'], ['vel'], the final model.
|
||||
# File: 'middle_model_step_XXXX.h5'. Keys: ['eta'], ['xi'], ['vel'], the model at step XXXX.
|
||||
# File: 'inversion_grid.txt'. The location of inversion grid nodes
|
||||
# File: 'objective_function.txt'. The objective function value at each iteration
|
||||
# File: 'out_data_sim_group_X'. Keys: ['src_YYYY']['time_field_inv_XXXX'], traveltime field of source YYYY at iteration XXXX;
|
||||
# ['src_YYYY']['adjoint_field_inv_XXXX'], adjoint field of source YYYY at iteration XXXX;
|
||||
# ['1dinv']['time_field_1dinv_YYYY_inv_XXXX'], 2d traveltime field of source YYYY at iteration XXXX, in 1d inversion mode
|
||||
# ['1dinv']['adjoint_field_1dinv_YYYY_inv_XXXX'], 2d adjoint field of source YYYY at iteration XXXX, in 1d inversion mode
|
||||
|
||||
|
||||
#################################################
|
||||
# inversion or forward modeling #
|
||||
#################################################
|
||||
# run mode
|
||||
# 0 for forward simulation only,
|
||||
# 1 for inversion
|
||||
# 2 for earthquake relocation
|
||||
# 3 for inversion + earthquake relocation
|
||||
# 4 for 1d model inversion
|
||||
run_mode: 2
|
||||
|
||||
#################################################
|
||||
# relocation parameters setting #
|
||||
#################################################
|
||||
relocation: # update earthquake hypocenter and origin time (when run_mode : 2 and 3)
|
||||
min_Ndata: 4 # if the number of data of the earthquake is less than <min_Ndata>, the earthquake will not be relocated. defaut value: 4
|
||||
|
||||
# relocation_strategy
|
||||
step_length : 0.01 # initial step length of relocation perturbation. 0.01 means maximum 1% perturbation for each iteration.
|
||||
step_length_decay : 0.9 # if objective function increase, step size -> step length * step_length_decay. default: 0.9
|
||||
rescaling_dep_lat_lon_ortime: [10.0, 15.0, 15.0, 1.0] # The perturbation is related to <rescaling_dep_lat_lon_ortime>. Unit: km,km,km,second
|
||||
max_change_dep_lat_lon_ortime: [10.0, 15.0, 15.0, 1.0] # the change of dep,lat,lon,ortime do not exceed max_change. Unit: km,km,km,second
|
||||
max_iterations : 50 # maximum number of iterations for relocation
|
||||
tol_gradient : 0.0001 # if the norm of gradient is smaller than the tolerance, the iteration of relocation terminates
|
||||
|
||||
# -------------- using absolute traveltime data --------------
|
||||
abs_time:
|
||||
use_abs_time : true # 'yes' for using absolute traveltime data to update ortime and location; 'no' for not using (no need to set parameters in this section)
|
||||
|
||||
# -------------- using common source differential traveltime data --------------
|
||||
cs_dif_time:
|
||||
use_cs_time : false # 'yes' for using common source differential traveltime data to update ortime and location; 'no' for not using (no need to set parameters in this section)
|
||||
|
||||
# -------------- using common receiver differential traveltime data --------------
|
||||
cr_dif_time:
|
||||
use_cr_time : true # 'yes' for using common receiver differential traveltime data to update ortime and location; 'no' for not using (no need to set parameters in this section)
|
||||
@@ -0,0 +1,312 @@
|
||||
version: 3
|
||||
|
||||
#################################################
|
||||
# computational domian #
|
||||
#################################################
|
||||
domain:
|
||||
min_max_dep: [-10, 50] # depth in km
|
||||
min_max_lat: [0, 2] # latitude in degree
|
||||
min_max_lon: [0, 2] # longitude in degree
|
||||
n_rtp: [61, 61, 61] # number of nodes in depth,latitude,longitude direction
|
||||
|
||||
#################################################
|
||||
# traveltime data file path #
|
||||
#################################################
|
||||
source:
|
||||
src_rec_file: OUTPUT_FILES/OUTPUT_FILES_joint_step1/src_rec_file_reloc_0050_obs.dat # source receiver file path
|
||||
swap_src_rec: true # swap source and receiver (only valid for regional source and receiver, those of tele remain unchanged)
|
||||
|
||||
#################################################
|
||||
# initial model file path #
|
||||
#################################################
|
||||
model:
|
||||
init_model_path: 2_models/model_init_N61_61_61.h5 # path to initial model file
|
||||
# model_1d_name: dummy_model_1d_name # 1D model name used in teleseismic 2D solver (iasp91, ak135, user_defined is available), defined in include/1d_model.h
|
||||
|
||||
#################################################
|
||||
# parallel computation settings #
|
||||
#################################################
|
||||
parallel: # parameters for parallel computation
|
||||
n_sims: 8 # number of simultanoues runs (parallel the sources)
|
||||
ndiv_rtp: [1, 1, 1] # number of subdivision on each direction (parallel the computional domain)
|
||||
nproc_sub: 1 # number of processors for sweep parallelization (parallel the fast sweep method)
|
||||
use_gpu: false # true if use gpu (EXPERIMENTAL)
|
||||
|
||||
############################################
|
||||
# output file setting #
|
||||
############################################
|
||||
output_setting:
|
||||
output_dir: OUTPUT_FILES/OUTPUT_FILES_joint_step2 # path to output director (default is ./OUTPUT_FILES/)
|
||||
output_source_field: false # True: output the traveltime field and adjoint field of all sources at each iteration. Default: false. File: 'out_data_sim_group_X'.
|
||||
output_kernel: false # True: output sensitivity kernel and kernel density. Default: false. File: 'out_data_sim_group_X'.
|
||||
output_final_model: true # True: output merged final model. This file can be used as the input model for TomoATT. Default: true. File: 'model_final.h5'.
|
||||
output_middle_model: false # True: output merged intermediate models during inversion. This file can be used as the input model for TomoATT. Default: false. File: 'middle_model_step_XXXX.h5'
|
||||
output_in_process: false # True: output at each inv iteration, otherwise, only output step 0, Niter-1, Niter. Default: true. File: 'out_data_sim_group_0'.
|
||||
output_in_process_data: false # True: output src_rec_file at each inv iteration, otherwise, only output step 0, Niter-2, Niter-1. Default: true. File: 'src_rec_file_step_XXXX.dat'
|
||||
single_precision_output: false # True: output results in single precision. Default: false.
|
||||
verbose_output_level: 0 # output internal parameters, (to do).
|
||||
output_file_format: 0 # 0: hdf5, 1: ascii
|
||||
|
||||
# output files:
|
||||
# File: 'out_data_grid.h5'. Keys: ['Mesh']['elem_conn'], element index;
|
||||
# ['Mesh']['node_coords_p'], phi coordinates of nodes;
|
||||
# ['Mesh']['node_coords_t'], theta coordinates of nodes;
|
||||
# ['Mesh']['node_coords_r'], r coordinates of nodes;
|
||||
# ['Mesh']['node_coords_x'], phi coordinates of elements;
|
||||
# ['Mesh']['node_coords_y'], theta coordinates of elements;
|
||||
# ['Mesh']['node_coords_z'], r coordinates of elements;
|
||||
# File: 'out_data_sim_group_0'. Keys: ['model']['vel_inv_XXXX'], velocity model at iteration XXXX;
|
||||
# ['model']['xi_inv_XXXX'], xi model at iteration XXXX;
|
||||
# ['model']['eta_inv_XXXX'], eta model at iteration XXXX
|
||||
# ['model']['Ks_inv_XXXX'], sensitivity kernel related to slowness at iteration XXXX
|
||||
# ['model']['Kxi_inv_XXXX'], sensitivity kernel related to xi at iteration XXXX
|
||||
# ['model']['Keta_inv_XXXX'], sensitivity kernel related to eta at iteration XXXX
|
||||
# ['model']['Ks_density_inv_XXXX'], kernel density of Ks at iteration XXXX
|
||||
# ['model']['Kxi_density_inv_XXXX'], kernel density of Kxi at iteration XXXX
|
||||
# ['model']['Keta_density_inv_XXXX'], kernel density of Keta at iteration XXXX
|
||||
# ['model']['Ks_over_Kden_inv_XXXX'], slowness kernel over kernel density at iteration XXXX
|
||||
# ['model']['Kxi_over_Kden_inv_XXXX'], xi kernel over kernel density at iteration XXXX
|
||||
# ['model']['Keta_over_Kden_inv_XXXX'], eta kernel over kernel density at iteration XXXX
|
||||
# ['model']['Ks_update_inv_XXXX'], slowness kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['model']['Kxi_update_inv_XXXX'], xi kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['model']['Keta_update_inv_XXXX'], eta kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['1dinv']['vel_1dinv_inv_XXXX'], 2d velocity model at iteration XXXX, in 1d inversion mode
|
||||
# ['1dinv']['r_1dinv'], r coordinates (depth), in 1d inversion mode
|
||||
# ['1dinv']['t_1dinv'], t coordinates (epicenter distance), in 1d inversion mode
|
||||
# File: 'src_rec_file_step_XXXX.dat' or 'src_rec_file_forward.dat'. The synthetic traveltime data file.
|
||||
# File: 'final_model.h5'. Keys: ['eta'], ['xi'], ['vel'], the final model.
|
||||
# File: 'middle_model_step_XXXX.h5'. Keys: ['eta'], ['xi'], ['vel'], the model at step XXXX.
|
||||
# File: 'inversion_grid.txt'. The location of inversion grid nodes
|
||||
# File: 'objective_function.txt'. The objective function value at each iteration
|
||||
# File: 'out_data_sim_group_X'. Keys: ['src_YYYY']['time_field_inv_XXXX'], traveltime field of source YYYY at iteration XXXX;
|
||||
# ['src_YYYY']['adjoint_field_inv_XXXX'], adjoint field of source YYYY at iteration XXXX;
|
||||
# ['1dinv']['time_field_1dinv_YYYY_inv_XXXX'], 2d traveltime field of source YYYY at iteration XXXX, in 1d inversion mode
|
||||
# ['1dinv']['adjoint_field_1dinv_YYYY_inv_XXXX'], 2d adjoint field of source YYYY at iteration XXXX, in 1d inversion mode
|
||||
|
||||
|
||||
#################################################
|
||||
# inversion or forward modeling #
|
||||
#################################################
|
||||
# run mode
|
||||
# 0 for forward simulation only,
|
||||
# 1 for inversion
|
||||
# 2 for earthquake relocation
|
||||
# 3 for inversion + earthquake relocation
|
||||
# 4 for 1d model inversion
|
||||
run_mode: 3
|
||||
|
||||
have_tele_data: false # An error will be reported if false but source out of study region is used. Default: false.
|
||||
|
||||
|
||||
###################################################
|
||||
# model update parameters setting #
|
||||
###################################################
|
||||
model_update:
|
||||
max_iterations: 40 # maximum number of inversion iterations
|
||||
optim_method: 0 # optimization method. 0 : grad_descent, 1 : halve-stepping, 2 : lbfgs (EXPERIMENTAL)
|
||||
|
||||
#common parameters for all optim methods
|
||||
step_length: 0.02 # the initial step length of model perturbation. 0.01 means maximum 1% perturbation for each iteration.
|
||||
|
||||
# parameters for optim_method 0 (gradient_descent)
|
||||
optim_method_0:
|
||||
step_method: 1 # the method to modulate step size. 0: according to objective function; 1: according to gradient direction
|
||||
# if step_method:0. if objective function increase, step size -> step length * step_length_decay.
|
||||
step_length_decay: 0.9 # default: 0.9
|
||||
# if step_method:1. if the angle between the current and the previous gradients is greater than step_length_gradient_angle, step size -> step length * step_length_change[0].
|
||||
# otherwise, step size -> step length * step_length_change[1].
|
||||
step_length_gradient_angle: 120 # default: 120.0
|
||||
step_length_change: [0.5, 1.2] # default: [0.5,1.2]
|
||||
# Kdensity_coe is used to rescale the final kernel: kernel -> kernel / pow(density of kernel, Kdensity_coe). if Kdensity_coe > 0, the region with less data will be enhanced during the inversion
|
||||
# e.g., if Kdensity_coe = 0, kernel remains upchanged; if Kdensity_coe = 1, kernel is fully normalized. 0.5 or less is recommended if really required.
|
||||
Kdensity_coe: 0 # default: 0.0, limited range: 0.0 - 0.95
|
||||
|
||||
# parameters for optim_method 1 (halve-stepping) or 2 (lbfgs)
|
||||
optim_method_1_2:
|
||||
max_sub_iterations: 20 # maximum number of each sub-iteration
|
||||
regularization_weight: 0.5 # weight value for regularization (lbfgs mode only)
|
||||
coefs_regulalization_rtp: [1, 1, 1] # regularization coefficients for rtp (lbfgs mode only)
|
||||
|
||||
# smoothing
|
||||
smoothing:
|
||||
smooth_method: 0 # 0: multiparametrization, 1: laplacian smoothing (EXPERIMENTAL)
|
||||
l_smooth_rtp: [1, 1, 1] # smoothing coefficients for laplacian smoothing
|
||||
|
||||
# parameters for smooth method 0 (multigrid model parametrization)
|
||||
# inversion grid can be viewed in OUTPUT_FILES/inversion_grid.txt
|
||||
n_inversion_grid: 5 # number of inversion grid sets
|
||||
|
||||
uniform_inv_grid_dep: false # true if use uniform inversion grid for dep, false if use flexible inversion grid
|
||||
uniform_inv_grid_lat: true # true if use uniform inversion grid for lat, false if use flexible inversion grid
|
||||
uniform_inv_grid_lon: true # true if use uniform inversion grid for lon, false if use flexible inversion grid
|
||||
|
||||
# -------------- uniform inversion grid setting --------------
|
||||
# settings for uniform inversion grid
|
||||
n_inv_dep_lat_lon: [12, 9, 9] # number of the base inversion grid points
|
||||
min_max_dep_inv: [-10, 50] # depth in km (Radius of the earth is defined in config.h/R_earth)
|
||||
min_max_lat_inv: [0, 2] # latitude in degree
|
||||
min_max_lon_inv: [0, 2] # longitude in degree
|
||||
|
||||
# -------------- flexible inversion grid setting --------------
|
||||
# settings for flexible inversion grid
|
||||
dep_inv: [-10, 0, 10, 20, 30, 40, 50, 60] # inversion grid for vel in depth (km)
|
||||
lat_inv: [30, 30.2, 30.4, 30.6, 30.8, 31, 31.2, 31.4, 31.6, 31.8, 32] # inversion grid for vel in latitude (degree)
|
||||
lon_inv: [30, 30.2, 30.4, 30.6, 30.8, 31, 31.2, 31.4, 31.6, 31.8, 32] # inversion grid for vel in longitude (degree)
|
||||
trapezoid: [1, 0, 50] # usually set as [1.0, 0.0, 50.0] (default)
|
||||
|
||||
# if we want to use another inversion grid for inverting anisotropy, set invgrid_ani: true (default: false)
|
||||
invgrid_ani: false
|
||||
|
||||
# ---------- uniform inversion grid setting for anisotropy ----------
|
||||
n_inv_dep_lat_lon_ani: [12, 11, 11] # number of the base inversion grid points
|
||||
min_max_dep_inv_ani: [-7, 63] # depth in km (Radius of the earth is defined in config.h/R_earth)
|
||||
min_max_lat_inv_ani: [30, 32] # latitude in degree
|
||||
min_max_lon_inv_ani: [30, 32] # longitude in degree
|
||||
|
||||
# ---------- flexible inversion grid setting for anisotropy ----------
|
||||
# settings for flexible inversion grid for anisotropy
|
||||
dep_inv_ani: [-7, -3, 0, 3, 7, 12, 18, 25, 33, 42, 52, 63] # inversion grid for ani in depth (km)
|
||||
lat_inv_ani: [30, 30.2, 30.4, 30.6, 30.8, 31, 31.2, 31.4, 31.6, 31.8, 32] # inversion grid for ani in latitude (degree)
|
||||
lon_inv_ani: [30, 30.2, 30.4, 30.6, 30.8, 31, 31.2, 31.4, 31.6, 31.8, 32] # inversion grid for ani in longitude (degree)
|
||||
trapezoid_ani: [1, 0, 50] # usually set as [1.0, 0.0, 50.0] (default)
|
||||
|
||||
# Carefully change trapezoid and trapezoid_ani, if you really want to use trapezoid inversion grid, increasing the inversion grid spacing with depth to account for the worse data coverage in greater depths.
|
||||
# The trapezoid_ inversion grid with index (i,j,k) in longitude, latitude, and depth is defined as:
|
||||
# if dep_inv[k] < trapezoid[1], lon = lon_inv[i];
|
||||
# lat = lat_inv[j];
|
||||
# dep = dep_inv[k];
|
||||
# if trapezoid[1] <= dep_inv[k] < trapezoid[2], lon = mid_lon_inv+(lon_inv[i]-mid_lon_inv)*(dep_inv[k]-trapezoid[1])/(trapezoid[2]-trapezoid[1])*trapezoid[0];
|
||||
# lat = mid_lat_inv+(lat_inv[i]-mid_lat_inv)*(dep_inv[k]-trapezoid[1])/(trapezoid[2]-trapezoid[1])*trapezoid[0];
|
||||
# dep = dep_inv[k];
|
||||
# if trapezoid[2] <= dep_inv[k], lon = mid_lon_inv+(lon_inv[i]-mid_lon_inv)*trapezoid[0];
|
||||
# lat = mid_lat_inv+(lat_inv[i]-mid_lat_inv)*trapezoid[0];
|
||||
# dep = dep_inv[k];
|
||||
# The shape of trapezoid inversion gird (x) looks like:
|
||||
#
|
||||
# lon_inv[0] [1] [2] [3] [4]
|
||||
# |<-------- (lon_inv[end] - lon_inv[0]) ---->|
|
||||
# dep_inv[0] | x x x x x |
|
||||
# | |
|
||||
# dep_inv[1] | x x x x x |
|
||||
# | |
|
||||
# dep_inv[2] = trapezoid[1] / x x x x x \
|
||||
# / \
|
||||
# dep_inv[3] / x x x x x \
|
||||
# / \
|
||||
# dep_inv[4] = trapezoid[2] / x x x x x \
|
||||
# | |
|
||||
# dep_inv[5] | x x x x x |
|
||||
# | |
|
||||
# dep_inv[6] | x x x x x |
|
||||
# |<---- trapezoid[0]* (lon_inv[end] - lon_inv[0]) ------>|
|
||||
|
||||
# inversion grid volume rescale (kernel -> kernel / volume of inversion grid mesh),
|
||||
# this precondition may be carefully applied if the sizes of inversion grids are unbalanced
|
||||
invgrid_volume_rescale: false
|
||||
|
||||
# path to station correction file (under development)
|
||||
use_sta_correction: false
|
||||
# initial_sta_correction_file: dummy_sta_correction_file # the path of initial station correction
|
||||
step_length_sta_correction: 0.001 # step length relate to the update of station correction terms
|
||||
|
||||
|
||||
# In the following data subsection, XXX_weight means a weight is assigned to the data, influencing the objective function and gradient
|
||||
# XXX_weight : [d1,d2,w1,w2] means:
|
||||
# if XXX < d1, weight = w1
|
||||
# if d1 <= XXX < d2, weight = w1 + (XXX-d1)/(d2-d1)*(w2-w1), (linear interpolation)
|
||||
# if d2 <= XXX , weight = w2
|
||||
# You can easily set w1 = w2 = 1.0 to normalize the weight related to XXX.
|
||||
# -------------- using absolute traveltime data --------------
|
||||
abs_time:
|
||||
use_abs_time: true # 'true' for using absolute traveltime data to update model parameters; 'false' for not using (no need to set parameters in this section)
|
||||
residual_weight: [1, 3, 1, 1] # XXX is the absolute traveltime residual (second) = abs(t^{obs}_{n,i} - t^{syn}_{n,j})
|
||||
distance_weight: [100, 200, 1, 1] # XXX is epicenter distance (km) between the source and receiver related to the data
|
||||
|
||||
# -------------- using common source differential traveltime data --------------
|
||||
cs_dif_time:
|
||||
use_cs_time: true # 'true' for using common source differential traveltime data to update model parameters; 'false' for not using (no need to set parameters in this section)
|
||||
residual_weight: [1, 3, 1, 1] # XXX is the common source differential traveltime residual (second) = abs(t^{obs}_{n,i} - t^{obs}_{n,j} - t^{syn}_{n,i} + t^{syn}_{n,j}).
|
||||
azimuthal_weight: [15, 30, 1, 1] # XXX is the azimuth difference between two separate stations related to the common source.
|
||||
|
||||
# -------------- using common receiver differential traveltime data --------------
|
||||
cr_dif_time:
|
||||
use_cr_time: false # 'true' for using common receiver differential traveltime data to update model parameters; 'false' for not using (no need to set parameters in this section)
|
||||
residual_weight: [1, 3, 1, 1] # XXX is the common receiver differential traveltime residual (second) = abs(t^{obs}_{n,i} - t^{obs}_{m,i} - t^{syn}_{n,i} + t^{syn}_{m,i})
|
||||
azimuthal_weight: [15, 30, 1, 1] # XXX is the azimuth difference between two separate sources related to the common receiver.
|
||||
|
||||
# -------------- global weight of different types of data (to balance the weight of different data) --------------
|
||||
global_weight:
|
||||
balance_data_weight: true # yes: over the total weight of the each type of the data. no: use original weight (below weight for each type of data needs to be set)
|
||||
abs_time_weight: 1 # weight of absolute traveltime data after balance, default: 1.0
|
||||
cs_dif_time_local_weight: 1 # weight of common source differential traveltime data after balance, default: 1.0
|
||||
cr_dif_time_local_weight: 1 # weight of common receiver differential traveltime data after balance, default: 1.0
|
||||
teleseismic_weight: 1 # weight of teleseismic data after balance, default: 1.0 (exclude in this version)
|
||||
|
||||
# -------------- inversion parameters --------------
|
||||
update_slowness : true # update slowness (velocity) or not. default: true
|
||||
update_azi_ani : true # update azimuthal anisotropy (xi, eta) or not. default: false
|
||||
|
||||
# -------------- for teleseismic inversion (under development) --------------
|
||||
# depth_taper : [d1,d2] means:
|
||||
# if XXX < d1, kernel <- kernel * 0.0
|
||||
# if d1 <= XXX < d2, kernel <- kernel * (XXX-d1)/(d2-d1), (linear interpolation)
|
||||
# if d2 <= XXX , kernel <- kernel * 1.0
|
||||
# You can easily set d1 = -200, d1 = -100 to remove this taper.
|
||||
depth_taper : [-1e+07, -1e+07]
|
||||
|
||||
#################################################
|
||||
# relocation parameters setting #
|
||||
#################################################
|
||||
relocation: # update earthquake hypocenter and origin time (when run_mode : 2 and 3)
|
||||
min_Ndata: 4 # if the number of data of the earthquake is less than <min_Ndata>, the earthquake will not be relocated. defaut value: 4
|
||||
|
||||
# relocation_strategy
|
||||
step_length : 0.01 # initial step length of relocation perturbation. 0.01 means maximum 1% perturbation for each iteration.
|
||||
step_length_decay : 0.9 # if objective function increase, step size -> step length * step_length_decay. default: 0.9
|
||||
rescaling_dep_lat_lon_ortime : [10, 15, 15, 1] # The perturbation is related to <rescaling_dep_lat_lon_ortime>. Unit: km,km,km,second
|
||||
max_change_dep_lat_lon_ortime : [10, 15, 15, 1] # the change of dep,lat,lon,ortime do not exceed max_change. Unit: km,km,km,second
|
||||
max_iterations : 201 # maximum number of iterations for relocation
|
||||
tol_gradient : 0.0001 # if the norm of gradient is smaller than the tolerance, the iteration of relocation terminates
|
||||
|
||||
# -------------- using absolute traveltime data --------------
|
||||
abs_time:
|
||||
use_abs_time : true # 'yes' for using absolute traveltime data to update ortime and location; 'no' for not using (no need to set parameters in this section)
|
||||
residual_weight : [1, 3, 1, 1] # XXX is the absolute traveltime residual (second) = abs(t^{obs}_{n,i} - t^{syn}_{n,j})
|
||||
distance_weight : [1, 3, 1, 1] # XXX is epicenter distance (km) between the source and receiver related to the data
|
||||
|
||||
# -------------- using common source differential traveltime data --------------
|
||||
cs_dif_time:
|
||||
use_cs_time : false # 'yes' for using common source differential traveltime data to update ortime and location; 'no' for not using (no need to set parameters in this section)
|
||||
residual_weight : [1, 3, 1, 1] # XXX is the common source differential traveltime residual (second) = abs(t^{obs}_{n,i} - t^{obs}_{n,j} - t^{syn}_{n,i} + t^{syn}_{n,j}).
|
||||
azimuthal_weight : [100, 200, 1, 1] # XXX is the azimuth difference between two separate stations related to the common source.
|
||||
|
||||
# -------------- using common receiver differential traveltime data --------------
|
||||
cr_dif_time:
|
||||
use_cr_time : true # 'yes' for using common receiver differential traveltime data to update ortime and location; 'no' for not using (no need to set parameters in this section)
|
||||
residual_weight : [15, 30, 1, 1] # XXX is the common receiver differential traveltime residual (second) = abs(t^{obs}_{n,i} - t^{obs}_{m,i} - t^{syn}_{n,i} + t^{syn}_{m,i})
|
||||
azimuthal_weight : [15, 30, 1, 1] # XXX is the azimuth difference between two separate sources related to the common receiver.
|
||||
|
||||
|
||||
# -------------- global weight of different types of data (to balance the weight of different data) --------------
|
||||
global_weight:
|
||||
balance_data_weight: true # yes: over the total weight of the each type of the data. no: use original weight (below weight for each type of data needs to be set)
|
||||
abs_time_local_weight: 1 # weight of absolute traveltime data for relocation after balance, default: 1.0
|
||||
cs_dif_time_local_weight: 1 # weight of common source differential traveltime data for relocation after balance, default: 1.0
|
||||
cr_dif_time_local_weight: 1 # weight of common receiver differential traveltime data for relocation after balance, default: 1.0
|
||||
|
||||
####################################################################
|
||||
# inversion strategy for tomography and relocation #
|
||||
####################################################################
|
||||
inversion_strategy: # update model parameters and earthquake hypocenter iteratively (when run_mode : 3)
|
||||
|
||||
inv_mode : 1 # 0 for update model parameters and relocation iteratively. 1 for update model parameters and relocation simultaneously.
|
||||
|
||||
# for inv_mode : 0, parameters below are required
|
||||
inv_mode_0: # update model for <model_update_N_iter> steps, then update location for <relocation_N_iter> steps, and repeat the process for <max_loop> loops.
|
||||
model_update_N_iter : 1
|
||||
relocation_N_iter : 1
|
||||
max_loop : 10
|
||||
|
||||
# for inv_mode : 1, parameters below are required
|
||||
inv_mode_1: # update model and location simultaneously for <max_loop> loops.
|
||||
max_loop : 40
|
||||
@@ -0,0 +1,139 @@
|
||||
version: 3
|
||||
|
||||
#################################################
|
||||
# computational domian #
|
||||
#################################################
|
||||
domain:
|
||||
min_max_dep: [-10, 50] # depth in km
|
||||
min_max_lat: [0, 2] # latitude in degree
|
||||
min_max_lon: [0, 2] # longitude in degree
|
||||
n_rtp: [61, 61, 61] # number of nodes in depth,latitude,longitude direction
|
||||
|
||||
#################################################
|
||||
# traveltime data file path #
|
||||
#################################################
|
||||
source:
|
||||
src_rec_file: OUTPUT_FILES/OUTPUT_FILES_joint_step2/src_rec_file_inv_0039_reloc_0039_obs.dat # source receiver file path
|
||||
swap_src_rec: true # swap source and receiver (only valid for regional source and receiver, those of tele remain unchanged)
|
||||
|
||||
#################################################
|
||||
# initial model file path #
|
||||
#################################################
|
||||
model:
|
||||
init_model_path: OUTPUT_FILES/OUTPUT_FILES_joint_step2/final_model.h5 # path to initial model file
|
||||
# model_1d_name: dummy_model_1d_name # 1D model name used in teleseismic 2D solver (iasp91, ak135, user_defined is available), defined in include/1d_model.h
|
||||
|
||||
#################################################
|
||||
# parallel computation settings #
|
||||
#################################################
|
||||
parallel: # parameters for parallel computation
|
||||
n_sims: 8 # number of simultanoues runs (parallel the sources)
|
||||
ndiv_rtp: [1, 1, 1] # number of subdivision on each direction (parallel the computional domain)
|
||||
nproc_sub: 1 # number of processors for sweep parallelization (parallel the fast sweep method)
|
||||
use_gpu: false # true if use gpu (EXPERIMENTAL)
|
||||
|
||||
############################################
|
||||
# output file setting #
|
||||
############################################
|
||||
output_setting:
|
||||
output_dir: OUTPUT_FILES/OUTPUT_FILES_joint_step3 # path to output director (default is ./OUTPUT_FILES/)
|
||||
output_source_field: false # True: output the traveltime field and adjoint field of all sources at each iteration. Default: false. File: 'out_data_sim_group_X'.
|
||||
output_kernel: false # True: output sensitivity kernel and kernel density. Default: false. File: 'out_data_sim_group_X'.
|
||||
output_final_model: true # True: output merged final model. This file can be used as the input model for TomoATT. Default: true. File: 'model_final.h5'.
|
||||
output_middle_model: false # True: output merged intermediate models during inversion. This file can be used as the input model for TomoATT. Default: false. File: 'middle_model_step_XXXX.h5'
|
||||
output_in_process: false # True: output at each inv iteration, otherwise, only output step 0, Niter-1, Niter. Default: true. File: 'out_data_sim_group_0'.
|
||||
output_in_process_data: false # True: output src_rec_file at each inv iteration, otherwise, only output step 0, Niter-2, Niter-1. Default: true. File: 'src_rec_file_step_XXXX.dat'
|
||||
single_precision_output: false # True: output results in single precision. Default: false.
|
||||
verbose_output_level: 0 # output internal parameters, (to do).
|
||||
output_file_format: 0 # 0: hdf5, 1: ascii
|
||||
|
||||
# output files:
|
||||
# File: 'out_data_grid.h5'. Keys: ['Mesh']['elem_conn'], element index;
|
||||
# ['Mesh']['node_coords_p'], phi coordinates of nodes;
|
||||
# ['Mesh']['node_coords_t'], theta coordinates of nodes;
|
||||
# ['Mesh']['node_coords_r'], r coordinates of nodes;
|
||||
# ['Mesh']['node_coords_x'], phi coordinates of elements;
|
||||
# ['Mesh']['node_coords_y'], theta coordinates of elements;
|
||||
# ['Mesh']['node_coords_z'], r coordinates of elements;
|
||||
# File: 'out_data_sim_group_0'. Keys: ['model']['vel_inv_XXXX'], velocity model at iteration XXXX;
|
||||
# ['model']['xi_inv_XXXX'], xi model at iteration XXXX;
|
||||
# ['model']['eta_inv_XXXX'], eta model at iteration XXXX
|
||||
# ['model']['Ks_inv_XXXX'], sensitivity kernel related to slowness at iteration XXXX
|
||||
# ['model']['Kxi_inv_XXXX'], sensitivity kernel related to xi at iteration XXXX
|
||||
# ['model']['Keta_inv_XXXX'], sensitivity kernel related to eta at iteration XXXX
|
||||
# ['model']['Ks_density_inv_XXXX'], kernel density of Ks at iteration XXXX
|
||||
# ['model']['Kxi_density_inv_XXXX'], kernel density of Kxi at iteration XXXX
|
||||
# ['model']['Keta_density_inv_XXXX'], kernel density of Keta at iteration XXXX
|
||||
# ['model']['Ks_over_Kden_inv_XXXX'], slowness kernel over kernel density at iteration XXXX
|
||||
# ['model']['Kxi_over_Kden_inv_XXXX'], xi kernel over kernel density at iteration XXXX
|
||||
# ['model']['Keta_over_Kden_inv_XXXX'], eta kernel over kernel density at iteration XXXX
|
||||
# ['model']['Ks_update_inv_XXXX'], slowness kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['model']['Kxi_update_inv_XXXX'], xi kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['model']['Keta_update_inv_XXXX'], eta kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['1dinv']['vel_1dinv_inv_XXXX'], 2d velocity model at iteration XXXX, in 1d inversion mode
|
||||
# ['1dinv']['r_1dinv'], r coordinates (depth), in 1d inversion mode
|
||||
# ['1dinv']['t_1dinv'], t coordinates (epicenter distance), in 1d inversion mode
|
||||
# File: 'src_rec_file_step_XXXX.dat' or 'src_rec_file_forward.dat'. The synthetic traveltime data file.
|
||||
# File: 'final_model.h5'. Keys: ['eta'], ['xi'], ['vel'], the final model.
|
||||
# File: 'middle_model_step_XXXX.h5'. Keys: ['eta'], ['xi'], ['vel'], the model at step XXXX.
|
||||
# File: 'inversion_grid.txt'. The location of inversion grid nodes
|
||||
# File: 'objective_function.txt'. The objective function value at each iteration
|
||||
# File: 'out_data_sim_group_X'. Keys: ['src_YYYY']['time_field_inv_XXXX'], traveltime field of source YYYY at iteration XXXX;
|
||||
# ['src_YYYY']['adjoint_field_inv_XXXX'], adjoint field of source YYYY at iteration XXXX;
|
||||
# ['1dinv']['time_field_1dinv_YYYY_inv_XXXX'], 2d traveltime field of source YYYY at iteration XXXX, in 1d inversion mode
|
||||
# ['1dinv']['adjoint_field_1dinv_YYYY_inv_XXXX'], 2d adjoint field of source YYYY at iteration XXXX, in 1d inversion mode
|
||||
|
||||
|
||||
#################################################
|
||||
# inversion or forward modeling #
|
||||
#################################################
|
||||
# run mode
|
||||
# 0 for forward simulation only,
|
||||
# 1 for inversion
|
||||
# 2 for earthquake relocation
|
||||
# 3 for inversion + earthquake relocation
|
||||
# 4 for 1d model inversion
|
||||
run_mode: 2
|
||||
|
||||
have_tele_data: false # An error will be reported if false but source out of study region is used. Default: false.
|
||||
|
||||
|
||||
#################################################
|
||||
# relocation parameters setting #
|
||||
#################################################
|
||||
relocation: # update earthquake hypocenter and origin time (when run_mode : 2 and 3)
|
||||
min_Ndata: 4 # if the number of data of the earthquake is less than <min_Ndata>, the earthquake will not be relocated. defaut value: 4
|
||||
|
||||
# relocation_strategy
|
||||
step_length : 0.01 # initial step length of relocation perturbation. 0.01 means maximum 1% perturbation for each iteration.
|
||||
step_length_decay : 0.9 # if objective function increase, step size -> step length * step_length_decay. default: 0.9
|
||||
rescaling_dep_lat_lon_ortime : [10, 15, 15, 1] # The perturbation is related to <rescaling_dep_lat_lon_ortime>. Unit: km,km,km,second
|
||||
max_change_dep_lat_lon_ortime : [10, 15, 15, 1] # the change of dep,lat,lon,ortime do not exceed max_change. Unit: km,km,km,second
|
||||
max_iterations : 100 # maximum number of iterations for relocation
|
||||
tol_gradient : 0.0001 # if the norm of gradient is smaller than the tolerance, the iteration of relocation terminates
|
||||
|
||||
# -------------- using absolute traveltime data --------------
|
||||
abs_time:
|
||||
use_abs_time : false # 'yes' for using absolute traveltime data to update ortime and location; 'no' for not using (no need to set parameters in this section)
|
||||
residual_weight : [1, 3, 1, 1] # XXX is the absolute traveltime residual (second) = abs(t^{obs}_{n,i} - t^{syn}_{n,j})
|
||||
distance_weight : [1, 3, 1, 1] # XXX is epicenter distance (km) between the source and receiver related to the data
|
||||
|
||||
# -------------- using common source differential traveltime data --------------
|
||||
cs_dif_time:
|
||||
use_cs_time : false # 'yes' for using common source differential traveltime data to update ortime and location; 'no' for not using (no need to set parameters in this section)
|
||||
residual_weight : [1, 3, 1, 1] # XXX is the common source differential traveltime residual (second) = abs(t^{obs}_{n,i} - t^{obs}_{n,j} - t^{syn}_{n,i} + t^{syn}_{n,j}).
|
||||
azimuthal_weight : [100, 200, 1, 1] # XXX is the azimuth difference between two separate stations related to the common source.
|
||||
|
||||
# -------------- using common receiver differential traveltime data --------------
|
||||
cr_dif_time:
|
||||
use_cr_time : true # 'yes' for using common receiver differential traveltime data to update ortime and location; 'no' for not using (no need to set parameters in this section)
|
||||
residual_weight : [15, 30, 1, 1] # XXX is the common receiver differential traveltime residual (second) = abs(t^{obs}_{n,i} - t^{obs}_{m,i} - t^{syn}_{n,i} + t^{syn}_{m,i})
|
||||
azimuthal_weight : [15, 30, 1, 1] # XXX is the azimuth difference between two separate sources related to the common receiver.
|
||||
|
||||
|
||||
# -------------- global weight of different types of data (to balance the weight of different data) --------------
|
||||
global_weight:
|
||||
balance_data_weight: true # yes: over the total weight of the each type of the data. no: use original weight (below weight for each type of data needs to be set)
|
||||
abs_time_local_weight: 1 # weight of absolute traveltime data for relocation after balance, default: 1.0
|
||||
cs_dif_time_local_weight: 1 # weight of common source differential traveltime data for relocation after balance, default: 1.0
|
||||
cr_dif_time_local_weight: 1 # weight of common receiver differential traveltime data for relocation after balance, default: 1.0
|
||||
@@ -0,0 +1,50 @@
|
||||
version: 3
|
||||
|
||||
#################################################
|
||||
# computational domian #
|
||||
#################################################
|
||||
domain:
|
||||
min_max_dep: [-10, 50] # depth in km
|
||||
min_max_lat: [0, 2] # latitude in degree
|
||||
min_max_lon: [0, 2] # longitude in degree
|
||||
n_rtp: [61, 61, 61] # number of nodes in depth,latitude,longitude direction
|
||||
|
||||
#################################################
|
||||
# traveltime data file path #
|
||||
#################################################
|
||||
source:
|
||||
src_rec_file: 1_src_rec_files/src_rec_config.dat # source receiver file path
|
||||
swap_src_rec: true # swap source and receiver
|
||||
|
||||
#################################################
|
||||
# initial model file path #
|
||||
#################################################
|
||||
model:
|
||||
init_model_path: 2_models/model_ckb_N61_61_61.h5 # path to initial model file
|
||||
|
||||
#################################################
|
||||
# parallel computation settings #
|
||||
#################################################
|
||||
parallel: # parameters for parallel computation
|
||||
n_sims: 8 # number of simultanoues runs (parallel the sources)
|
||||
ndiv_rtp: [1, 1, 1] # number of subdivision on each direction (parallel the computional domain)
|
||||
|
||||
############################################
|
||||
# output file setting #
|
||||
############################################
|
||||
output_setting:
|
||||
output_dir: OUTPUT_FILES/OUTPUT_FILES_signal # path to output director (default is ./OUTPUT_FILES/)
|
||||
output_final_model: true # output merged final model (final_model.h5) or not.
|
||||
output_in_process: false # output model at each inv iteration or not.
|
||||
output_in_process_data: false # output src_rec_file at each inv iteration or not.
|
||||
output_file_format: 0
|
||||
|
||||
#################################################
|
||||
# inversion or forward modeling #
|
||||
#################################################
|
||||
# run mode
|
||||
# 0 for forward simulation only,
|
||||
# 1 for inversion
|
||||
# 2 for earthquake relocation
|
||||
# 3 for inversion + earthquake relocation
|
||||
run_mode: 0
|
||||
29
examples/eg3_joint_inversion/README.md
Normal file
29
examples/eg3_joint_inversion/README.md
Normal file
@@ -0,0 +1,29 @@
|
||||
# joint inversion
|
||||
|
||||
This is a toy model to simultaneously update model parameters and locate earthquakes (Figure 8e.)
|
||||
|
||||
Reference:
|
||||
[1] J. Chen, M. Nagaso, M. Xu, and P. Tong, TomoATT: An open-source package for Eikonal equation-based adjoint-state traveltime tomography for seismic velocity and azimuthal anisotropy, submitted.
|
||||
https://doi.org/10.48550/arXiv.2412.00031
|
||||
|
||||
Python modules are required to initiate the inversion and to plot final results:
|
||||
- h5py
|
||||
- PyTomoAT
|
||||
- Pygmt
|
||||
- gmt
|
||||
|
||||
Run this example:
|
||||
|
||||
1. Run bash script `bash run_this_example.sh` to execute the test.
|
||||
|
||||
2. After inversion, run `plot_output.py` to plot the results.
|
||||
|
||||
The initial and true models:
|
||||
|
||||

|
||||
|
||||
The inversion results:
|
||||
|
||||

|
||||
|
||||
|
||||
34
examples/eg3_joint_inversion/assign_gaussian_noise.py
Normal file
34
examples/eg3_joint_inversion/assign_gaussian_noise.py
Normal file
@@ -0,0 +1,34 @@
|
||||
from pytomoatt.src_rec import SrcRec
|
||||
|
||||
class AssignNoise:
|
||||
def __init__(self, in_fname, out_fname):
|
||||
self.in_fname = in_fname
|
||||
self.out_fname = out_fname
|
||||
self.sr = SrcRec.read(self.in_fname)
|
||||
|
||||
def assign_noise_for_tt(self, noise_level=0.1):
|
||||
self.sr.add_noise(noise_level)
|
||||
|
||||
def assign_noise_for_src(self, lat_pert=0.1, lon_pert=0.1, dep_pert=10, tau_pert=0.5):
|
||||
self.sr.add_noise_to_source(lat_pert, lon_pert, dep_pert, tau_pert)
|
||||
|
||||
if __name__ == "__main__":
|
||||
in_fname = "OUTPUT_FILES/OUTPUT_FILES_signal/src_rec_file_forward.dat" # input source receiver file
|
||||
out_fname = "OUTPUT_FILES/OUTPUT_FILES_signal/src_rec_file_forward_errloc.dat" # output source receiver file
|
||||
sigma = 0.1 # noise level in seconds
|
||||
lat_pert = 0.1 # assign noise for latitude in degrees
|
||||
lon_pert = 0.1 # assign noise for longitude in degrees
|
||||
dep_pert = 10 # assign noise for depth in km
|
||||
tau_pert = 0.5 # assign noise for origin time in seconds
|
||||
|
||||
# Initialize the instance
|
||||
an = AssignNoise(in_fname, out_fname)
|
||||
|
||||
# Assign noise for travel time
|
||||
an.assign_noise_for_tt(sigma)
|
||||
|
||||
# Assign noise for source
|
||||
an.assign_noise_for_src(lat_pert, lon_pert, dep_pert, tau_pert)
|
||||
|
||||
# Write the output file
|
||||
an.sr.write(out_fname)
|
||||
286
examples/eg3_joint_inversion/plot_output.py
Normal file
286
examples/eg3_joint_inversion/plot_output.py
Normal file
@@ -0,0 +1,286 @@
|
||||
# %%
|
||||
import pygmt
|
||||
pygmt.config(FONT="16p", IO_SEGMENT_MARKER="<<<")
|
||||
|
||||
import os
|
||||
|
||||
# %%
|
||||
from pytomoatt.model import ATTModel
|
||||
from pytomoatt.data import ATTData
|
||||
import numpy as np
|
||||
|
||||
# %%
|
||||
# read models
|
||||
|
||||
Ngrid = [61,61,61]
|
||||
data_file = '2_models/model_init_N%d_%d_%d.h5'%(Ngrid[0],Ngrid[1],Ngrid[2])
|
||||
par_file = '3_input_params/input_params_signal.yaml'
|
||||
model = ATTModel.read(data_file, par_file)
|
||||
initial_model = model.to_xarray()
|
||||
|
||||
data_file = '2_models/model_ckb_N%d_%d_%d.h5'%(Ngrid[0],Ngrid[1],Ngrid[2])
|
||||
model = ATTModel.read(data_file, par_file)
|
||||
ckb_model = model.to_xarray()
|
||||
|
||||
# initial model
|
||||
depth = 10.0
|
||||
vel_init = initial_model.interp_dep(depth, field='vel')
|
||||
start = [1.25,0]; end = [1.25,2]
|
||||
vel_init_sec = initial_model.interp_sec(start, end, field='vel', val = 1)
|
||||
|
||||
# checkerboard model
|
||||
vel_ckb = ckb_model.interp_dep(depth, field='vel') # lon = [:,0], lat = [:,1], vel = [:,2]
|
||||
vel_ckb_sec = ckb_model.interp_sec(start, end, field='vel', val = 1)
|
||||
|
||||
# anisotropic arrow
|
||||
samp_interval = 3
|
||||
length = 7
|
||||
width = 0.1
|
||||
ani_thd = 0.02
|
||||
|
||||
ani_ckb_phi = ckb_model.interp_dep(depth, field='phi', samp_interval=samp_interval)
|
||||
ani_ckb_epsilon = ckb_model.interp_dep(depth, field='epsilon', samp_interval=samp_interval)
|
||||
ani_ckb = np.hstack([ani_ckb_phi, ani_ckb_epsilon[:,2].reshape(-1, 1)*length, np.ones((ani_ckb_epsilon.shape[0],1))*width]) # lon, lat, angle, length, width
|
||||
idx = np.where(ani_ckb_epsilon[:,2] > ani_thd)
|
||||
ani_ckb = ani_ckb[idx[0],:]
|
||||
|
||||
try:
|
||||
os.mkdir('img')
|
||||
except:
|
||||
pass
|
||||
|
||||
# %%
|
||||
# read src_rec_file for data
|
||||
from pytomoatt.src_rec import SrcRec
|
||||
|
||||
sr = SrcRec.read('1_src_rec_files/src_rec_config.dat')
|
||||
station = sr.receivers[['stlo','stla','stel']].values.T
|
||||
true_loc = sr.sources[['evlo','evla','evdp']].values.T
|
||||
earthquake = true_loc
|
||||
|
||||
sr = SrcRec.read('OUTPUT_FILES/OUTPUT_FILES_signal/src_rec_file_forward_errloc.dat')
|
||||
init_loc = sr.sources[['evlo','evla','evdp']].values.T
|
||||
|
||||
# %%
|
||||
# categorize earthquakes
|
||||
ev_idx1 = []
|
||||
ev_idx2 = []
|
||||
ev_idx3 = []
|
||||
for i in range(earthquake.shape[1]):
|
||||
dep = earthquake[2,i]
|
||||
if dep < 15:
|
||||
ev_idx1.append(i)
|
||||
elif dep < 25:
|
||||
ev_idx2.append(i)
|
||||
elif dep < 35:
|
||||
ev_idx3.append(i)
|
||||
|
||||
# %%
|
||||
# plot the model setting
|
||||
fig = pygmt.Figure()
|
||||
|
||||
region = [0,2,0,2]
|
||||
frame = ["xa1","ya1"]
|
||||
projection = "M10c"
|
||||
spacing = 0.04
|
||||
|
||||
vel_range = 20
|
||||
|
||||
# -------------- initial model and earthquake location --------------
|
||||
fig.basemap(region=region, frame=["xa1","ya1","+tInitial model and locations"], projection=projection)
|
||||
# velocity perturbation
|
||||
pygmt.makecpt(cmap="../utils/svel13_chen.cpt", series=[-vel_range, vel_range], background=True, reverse=False)
|
||||
x = vel_init[:,0]; y = vel_init[:,1]; value = (vel_init[:,2] - vel_init[:,2])/vel_init[:,2] * 100
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing=spacing,region=region)
|
||||
fig.grdimage(grid = grid)
|
||||
# earthquakes
|
||||
fig.plot(x = init_loc[0,ev_idx1], y = init_loc[1,ev_idx1], style = "c0.1c", fill = "red")
|
||||
fig.plot(x = init_loc[0,ev_idx2], y = init_loc[1,ev_idx2], style = "c0.1c", fill = "green")
|
||||
fig.plot(x = init_loc[0,ev_idx3], y = init_loc[1,ev_idx3], style = "c0.1c", fill = "black")
|
||||
|
||||
# stations
|
||||
fig.plot(x = station[0,:], y = station[1,:], style = "t0.4c", fill = "blue", pen = "black", label = "Station")
|
||||
|
||||
# # anisotropic arrow
|
||||
# fig.plot(ani_ckb, style='j', fill='yellow1', pen='0.5p,black')
|
||||
|
||||
fig.shift_origin(xshift=11)
|
||||
|
||||
fig.basemap(region=[0,40,0,2], frame=["xa20+lDepth (km)","ya1","Nswe"], projection="X2c/10c")
|
||||
x = vel_init_sec[:,3]; y = vel_init_sec[:,1]; value = (vel_init_sec[:,4] - vel_init_sec[:,4])/vel_init_sec[:,4] * 100
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing="1/0.04",region=[0,40,0,2])
|
||||
fig.grdimage(grid = grid)
|
||||
|
||||
# earthquakes
|
||||
fig.plot(x = init_loc[2,ev_idx1], y = init_loc[1,ev_idx1], style = "c0.1c", fill = "red")
|
||||
fig.plot(x = init_loc[2,ev_idx2], y = init_loc[1,ev_idx2], style = "c0.1c", fill = "green")
|
||||
fig.plot(x = init_loc[2,ev_idx3], y = init_loc[1,ev_idx3], style = "c0.1c", fill = "black")
|
||||
|
||||
|
||||
fig.shift_origin(xshift=4)
|
||||
|
||||
|
||||
# -------------- true model and earthquake location --------------
|
||||
fig.basemap(region=region, frame=["xa1","ya1","+tTrue model and locations"], projection=projection)
|
||||
# velocity perturbation
|
||||
pygmt.makecpt(cmap="../utils/svel13_chen.cpt", series=[-vel_range, vel_range], background=True, reverse=False)
|
||||
x = vel_ckb[:,0]; y = vel_ckb[:,1]; value = (vel_ckb[:,2] - vel_init[:,2])/vel_init[:,2] * 100
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing=spacing,region=region)
|
||||
fig.grdimage(grid = grid)
|
||||
# earthquakes
|
||||
fig.plot(x = true_loc[0,ev_idx1], y = true_loc[1,ev_idx1], style = "c0.1c", fill = "red")
|
||||
fig.plot(x = true_loc[0,ev_idx2], y = true_loc[1,ev_idx2], style = "c0.1c", fill = "green")
|
||||
fig.plot(x = true_loc[0,ev_idx3], y = true_loc[1,ev_idx3], style = "c0.1c", fill = "black")
|
||||
|
||||
# stations
|
||||
# fig.plot(x = loc_st[0,:], y = loc_st[1,:], style = "t0.4c", fill = "blue", pen = "black", label = "Station")
|
||||
|
||||
# anisotropic arrow
|
||||
fig.plot(ani_ckb, style='j', fill='yellow1', pen='0.5p,black')
|
||||
|
||||
fig.shift_origin(xshift=11)
|
||||
|
||||
fig.basemap(region=[0,40,0,2], frame=["xa20+lDepth (km)","ya1","Nswe"], projection="X2c/10c")
|
||||
x = vel_ckb_sec[:,3]; y = vel_ckb_sec[:,1]; value = (vel_ckb_sec[:,4] - vel_init_sec[:,4])/vel_init_sec[:,4] * 100
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing="1/0.04",region=[0,40,0,2])
|
||||
fig.grdimage(grid = grid)
|
||||
|
||||
# earthquakes
|
||||
fig.plot(x = true_loc[2,ev_idx1], y = true_loc[1,ev_idx1], style = "c0.1c", fill = "red")
|
||||
fig.plot(x = true_loc[2,ev_idx2], y = true_loc[1,ev_idx2], style = "c0.1c", fill = "green")
|
||||
fig.plot(x = true_loc[2,ev_idx3], y = true_loc[1,ev_idx3], style = "c0.1c", fill = "black")
|
||||
|
||||
|
||||
# ------------------- colorbar -------------------
|
||||
fig.shift_origin(xshift=-11, yshift=-1.5)
|
||||
fig.colorbar(frame = ["a%f"%(vel_range),"x+ldlnVp (%)"], position="+e+w4c/0.3c+h")
|
||||
|
||||
fig.shift_origin(xshift=6, yshift=-1)
|
||||
fig.basemap(region=[0,1,0,1], frame=["wesn"], projection="X6c/1.5c")
|
||||
ani = [
|
||||
[0.2, 0.6, 45, 0.02*length, width], # lon, lat, phi, epsilon, size
|
||||
[0.5, 0.6, 45, 0.05*length, width],
|
||||
[0.8, 0.6, 45, 0.10*length, width],
|
||||
]
|
||||
fig.plot(ani, style='j', fill='yellow1', pen='0.5p,black')
|
||||
fig.text(text=["0.02", "0.05", "0.10"], x=[0.2,0.5,0.8], y=[0.2]*3, font="16p,Helvetica", justify="CM")
|
||||
fig.shift_origin(xshift= 11, yshift=2.5)
|
||||
|
||||
fig.show()
|
||||
fig.savefig('img/model_setting.png', dpi=300)
|
||||
|
||||
# %%
|
||||
# plot the joint inversion results
|
||||
|
||||
fig = pygmt.Figure()
|
||||
|
||||
region = [0,2,0,2]
|
||||
projection = "M10c"
|
||||
spacing = 0.04
|
||||
|
||||
vel_range = 20
|
||||
|
||||
tag_list = ["joint_step1", "joint_step2", "joint_step3"]
|
||||
|
||||
for itag, tag in enumerate(tag_list):
|
||||
|
||||
if (tag == "joint_step1"):
|
||||
# model
|
||||
x = vel_init[:,0]; y = vel_init[:,1]; value = (vel_init[:,2] - vel_init[:,2])/vel_init[:,2] * 100
|
||||
x_sec = vel_init_sec[:,3]; y_sec = vel_init_sec[:,1]; value_sec = (vel_init_sec[:,4] - vel_init_sec[:,4])/vel_init_sec[:,4] * 100
|
||||
|
||||
# location
|
||||
sr = SrcRec.read('OUTPUT_FILES/OUTPUT_FILES_%s/src_rec_file_reloc_0050.dat'%(tag))
|
||||
re_loc = sr.sources[['evlo','evla','evdp']].values.T
|
||||
|
||||
frame = ["xa1","ya1","+tStep %d, preliminary location"%(itag+1)]
|
||||
elif (tag == "joint_step2"):
|
||||
# model
|
||||
data_file = "OUTPUT_FILES/OUTPUT_FILES_%s/final_model.h5"%(tag)
|
||||
model = ATTModel.read(data_file, par_file)
|
||||
inv_model = model.to_xarray()
|
||||
vel_inv = inv_model.interp_dep(depth, field='vel') # lon = [:,0], lat = [:,1], vel = [:,2]
|
||||
x = vel_inv[:,0]; y = vel_inv[:,1]; value = (vel_inv[:,2] - vel_init[:,2])/vel_init[:,2] * 100
|
||||
vel_inv_sec = inv_model.interp_sec(start, end, field='vel', val = 1)
|
||||
x_sec = vel_inv_sec[:,3]; y_sec = vel_inv_sec[:,1]; value_sec = (vel_inv_sec[:,4] - vel_init_sec[:,4])/vel_init_sec[:,4] * 100
|
||||
|
||||
ani_inv_phi = inv_model.interp_dep(depth, field='phi', samp_interval=samp_interval)
|
||||
ani_inv_epsilon = inv_model.interp_dep(depth, field='epsilon', samp_interval=samp_interval)
|
||||
ani_inv = np.hstack([ani_inv_phi, ani_inv_epsilon[:,2].reshape(-1, 1)*length, np.ones((ani_inv_epsilon.shape[0],1))*width]) # lon, lat, angle, length, width
|
||||
idx = np.where(ani_inv_epsilon[:,2] > ani_thd)
|
||||
ani = ani_inv[idx[0],:]
|
||||
|
||||
# location
|
||||
sr = SrcRec.read('OUTPUT_FILES/OUTPUT_FILES_%s/src_rec_file_inv_0039_reloc_0039.dat'%(tag))
|
||||
re_loc = sr.sources[['evlo','evla','evdp']].values.T
|
||||
|
||||
frame = ["xa1","ya1","+tStep %d, joint inversion"%(itag+1)]
|
||||
elif (tag == "joint_step3"):
|
||||
|
||||
# model
|
||||
x = vel_inv[:,0]; y = vel_inv[:,1]; value = (vel_inv[:,2] - vel_init[:,2])/vel_init[:,2] * 100
|
||||
x_sec = vel_inv_sec[:,3]; y_sec = vel_inv_sec[:,1]; value_sec = (vel_inv_sec[:,4] - vel_init_sec[:,4])/vel_init_sec[:,4] * 100
|
||||
ani = ani_inv[idx[0],:]
|
||||
|
||||
# location
|
||||
sr = SrcRec.read('OUTPUT_FILES/OUTPUT_FILES_%s/src_rec_file_reloc_0100.dat'%(tag))
|
||||
re_loc = sr.sources[['evlo','evla','evdp']].values.T
|
||||
|
||||
frame = ["xa1","ya1","+tStep %d, relocation"%(itag+1)]
|
||||
|
||||
# plot the inversion result
|
||||
|
||||
|
||||
# -------------- inversion model --------------
|
||||
fig.basemap(region=region, frame=frame, projection=projection)
|
||||
# velocity perturbation
|
||||
pygmt.makecpt(cmap="../utils/svel13_chen.cpt", series=[-vel_range, vel_range], background=True, reverse=False)
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing=spacing,region=region)
|
||||
fig.grdimage(grid = grid)
|
||||
# earthquakes
|
||||
fig.plot(x = re_loc[0,ev_idx1], y = re_loc[1,ev_idx1], style = "c0.1c", fill = "red")
|
||||
fig.plot(x = re_loc[0,ev_idx2], y = re_loc[1,ev_idx2], style = "c0.1c", fill = "green")
|
||||
fig.plot(x = re_loc[0,ev_idx3], y = re_loc[1,ev_idx3], style = "c0.1c", fill = "black")
|
||||
|
||||
# stations
|
||||
# fig.plot(x = loc_st[0,:], y = loc_st[1,:], style = "t0.4c", fill = "blue", pen = "black", label = "Station")
|
||||
|
||||
# anisotropic arrow
|
||||
if (tag == "joint_step2" or tag == "joint_step3"):
|
||||
fig.plot(ani, style='j', fill='yellow1', pen='0.5p,black')
|
||||
|
||||
fig.shift_origin(xshift=11)
|
||||
|
||||
fig.basemap(region=[0,40,0,2], frame=["xa20+lDepth (km)","ya1","Nswe"], projection="X2c/10c")
|
||||
grid = pygmt.surface(x=x_sec, y=y_sec, z=value_sec, spacing="1/0.04",region=[0,40,0,2])
|
||||
fig.grdimage(grid = grid)
|
||||
|
||||
# earthquakes
|
||||
fig.plot(x = re_loc[2,ev_idx1], y = re_loc[1,ev_idx1], style = "c0.1c", fill = "red")
|
||||
fig.plot(x = re_loc[2,ev_idx2], y = re_loc[1,ev_idx2], style = "c0.1c", fill = "green")
|
||||
fig.plot(x = re_loc[2,ev_idx3], y = re_loc[1,ev_idx3], style = "c0.1c", fill = "black")
|
||||
|
||||
fig.shift_origin(xshift=4)
|
||||
|
||||
# ------------------- colorbar -------------------
|
||||
fig.shift_origin(xshift=-4)
|
||||
|
||||
fig.shift_origin(xshift=-11, yshift=-1.5)
|
||||
fig.colorbar(frame = ["a%f"%(vel_range),"x+ldlnVp (%)"], position="+e+w4c/0.3c+h")
|
||||
|
||||
fig.shift_origin(xshift=6, yshift=-1)
|
||||
fig.basemap(region=[0,1,0,1], frame=["wesn"], projection="X6c/1.5c")
|
||||
ani = [
|
||||
[0.2, 0.6, 45, 0.02*length, width], # lon, lat, phi, epsilon, size
|
||||
[0.5, 0.6, 45, 0.05*length, width],
|
||||
[0.8, 0.6, 45, 0.10*length, width],
|
||||
]
|
||||
fig.plot(ani, style='j', fill='yellow1', pen='0.5p,black')
|
||||
fig.text(text=["0.02", "0.05", "0.10"], x=[0.2,0.5,0.8], y=[0.2]*3, font="16p,Helvetica", justify="CM")
|
||||
fig.shift_origin(xshift= 11, yshift=2.5)
|
||||
|
||||
|
||||
fig.show()
|
||||
fig.savefig('img/model_joint.png', dpi=300)
|
||||
|
||||
|
||||
63
examples/eg3_joint_inversion/prepare_input_files.py
Normal file
63
examples/eg3_joint_inversion/prepare_input_files.py
Normal file
@@ -0,0 +1,63 @@
|
||||
# download src_ref_files from Zenodo
|
||||
import os
|
||||
import numpy as np
|
||||
import sys
|
||||
try:
|
||||
from pytomoatt.model import ATTModel
|
||||
from pytomoatt.checkerboard import Checker
|
||||
from pytomoatt.src_rec import SrcRec
|
||||
except:
|
||||
print("ERROR: ATTModel not found. Please install pytomoatt first."
|
||||
"See https://tomoatt.github.io/PyTomoATT/installation.html for details.")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
class BuildInitialModel():
|
||||
def __init__(self, par_file="./3_input_params/input_params_signal.yaml", output_dir="2_models"):
|
||||
"""
|
||||
Build initial model for tomography inversion
|
||||
"""
|
||||
self.am = ATTModel(par_file)
|
||||
self.output_dir = output_dir
|
||||
|
||||
def build_initial_model(self, vel_min=5.0, vel_max=8.0):
|
||||
"""
|
||||
Build initial model for tomography inversion
|
||||
"""
|
||||
self.am.vel[self.am.depths < 0, :, :] = vel_min
|
||||
idx = np.where((0 <= self.am.depths) & (self.am.depths < 40.0))[0]
|
||||
self.am.vel[idx, :, :] = np.linspace(vel_min, vel_max, idx.size)[::-1][:, np.newaxis, np.newaxis]
|
||||
self.am.vel[self.am.depths >= 40.0, :, :] = vel_max
|
||||
|
||||
|
||||
def build_ckb_model(output_dir="2_models"):
|
||||
cbk = Checker(f'{output_dir}/model_init_N61_61_61.h5', para_fname="./3_input_params/input_params_signal.yaml")
|
||||
cbk.checkerboard(
|
||||
n_pert_x=2, n_pert_y=2, n_pert_z=2,
|
||||
pert_vel=0.2, pert_ani=0.1, ani_dir=60.0,
|
||||
lim_x=[0.5, 1.5], lim_y=[0.5, 1.5], lim_z=[0, 40]
|
||||
)
|
||||
cbk.write(f'{output_dir}/model_ckb_N61_61_61.h5')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# download src_rec_config.dat
|
||||
url = 'https://zenodo.org/records/14053821/files/src_rec_config.dat'
|
||||
path = "1_src_rec_files/src_rec_config.dat"
|
||||
os.makedirs(os.path.dirname(path), exist_ok=True)
|
||||
if not os.path.exists(path):
|
||||
sr = SrcRec.read(url)
|
||||
sr.write(path)
|
||||
|
||||
# build initial model
|
||||
output_dir = "2_models"
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
bim = BuildInitialModel(output_dir=output_dir)
|
||||
bim.build_initial_model()
|
||||
bim.am.write('{}/model_init_N{:d}_{:d}_{:d}.h5'.format(bim.output_dir, *bim.am.n_rtp))
|
||||
|
||||
# build ckb model
|
||||
build_ckb_model(output_dir)
|
||||
|
||||
|
||||
|
||||
40
examples/eg3_joint_inversion/run_this_example.sh
Normal file
40
examples/eg3_joint_inversion/run_this_example.sh
Normal file
@@ -0,0 +1,40 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
# Step 1: Generate necessary input files
|
||||
python prepare_input_files.py
|
||||
|
||||
# Step 2: Run forward modeling
|
||||
# # for WSL
|
||||
# mpirun -n 8 --allow-run-as-root --oversubscribe ../../build/bin/TOMOATT -i 3_input_params/input_params_signal.yaml
|
||||
# # for Linux
|
||||
# mpirun -n 8 ../../build/bin/TOMOATT -i 3_input_params/input_params_signal.yaml
|
||||
# for conda install
|
||||
mpirun -n 8 TOMOATT -i 3_input_params/input_params_signal.yaml
|
||||
|
||||
# Step 3: Assign data noise and location perturbation to the observational data
|
||||
python assign_gaussian_noise.py
|
||||
|
||||
# Step 4: Do joint inversion
|
||||
# # for WSL
|
||||
# # step 1. relocation for 50 iterations in the initial model, using traveltimes and common-receiver differential arrival times
|
||||
# mpirun -n 8 --allow-run-as-root --oversubscribe ../../build/bin/TOMOATT -i 3_input_params/input_params_joint_step1.yaml
|
||||
# # step 2. simultaneously update model parameters and locations for 40 iterations,
|
||||
# # using traveltimes and common-source differential arrival times for model update
|
||||
# # using traveltimes and common-receiver differential arrival times for location
|
||||
# mpirun -n 8 --allow-run-as-root --oversubscribe ../../build/bin/TOMOATT -i 3_input_params/input_params_joint_step2.yaml
|
||||
# # step 3. relocation for 50 iterations in the initial model, using only common-receiver differential arrival times
|
||||
# mpirun -n 8 --allow-run-as-root --oversubscribe ../../build/bin/TOMOATT -i 3_input_params/input_params_joint_step3.yaml
|
||||
|
||||
# # for Linux
|
||||
# mpirun -n 8 ../../build/bin/TOMOATT -i 3_input_params/input_params_joint_step1.yaml
|
||||
# mpirun -n 8 ../../build/bin/TOMOATT -i 3_input_params/input_params_joint_step2.yaml
|
||||
# mpirun -n 8 ../../build/bin/TOMOATT -i 3_input_params/input_params_joint_step3.yaml
|
||||
|
||||
# for conda install
|
||||
mpirun -n 8 TOMOATT -i 3_input_params/input_params_joint_step1.yaml
|
||||
mpirun -n 8 TOMOATT -i 3_input_params/input_params_joint_step2.yaml
|
||||
mpirun -n 8 TOMOATT -i 3_input_params/input_params_joint_step3.yaml
|
||||
|
||||
# Step 5 (Optional): Plot the results
|
||||
python plot_output.py
|
||||
@@ -0,0 +1,215 @@
|
||||
version: 3
|
||||
|
||||
#################################################
|
||||
# computational domian #
|
||||
#################################################
|
||||
domain:
|
||||
min_max_dep: [-10, 50] # depth in km
|
||||
min_max_lat: [0, 2] # latitude in degree
|
||||
min_max_lon: [0, 2] # longitude in degree
|
||||
n_rtp: [61, 61, 61] # number of nodes in depth,latitude,longitude direction
|
||||
|
||||
#################################################
|
||||
# traveltime data file path #
|
||||
#################################################
|
||||
source:
|
||||
src_rec_file: OUTPUT_FILES/OUTPUT_FILES_1dinv_signal/src_rec_file_step_0000.dat # source receiver file path
|
||||
swap_src_rec: true # swap source and receiver (only valid for regional source and receiver, those of tele remain unchanged)
|
||||
|
||||
#################################################
|
||||
# initial model file path #
|
||||
#################################################
|
||||
model:
|
||||
init_model_path: 2_models/model_init_N61_61_61.h5 # path to initial model file
|
||||
# model_1d_name: dummy_model_1d_name # 1D model name used in teleseismic 2D solver (iasp91, ak135, user_defined is available), defined in include/1d_model.h
|
||||
|
||||
#################################################
|
||||
# parallel computation settings #
|
||||
#################################################
|
||||
parallel: # parameters for parallel computation
|
||||
n_sims: 8 # number of simultanoues runs (parallel the sources)
|
||||
ndiv_rtp: [1, 1, 1] # number of subdivision on each direction (parallel the computional domain)
|
||||
nproc_sub: 1 # number of processors for sweep parallelization (parallel the fast sweep method)
|
||||
use_gpu: false # true if use gpu (EXPERIMENTAL)
|
||||
|
||||
############################################
|
||||
# output file setting #
|
||||
############################################
|
||||
output_setting:
|
||||
output_dir: OUTPUT_FILES/OUTPUT_FILES_1dinv_inv # path to output director (default is ./OUTPUT_FILES/)
|
||||
output_source_field: true # True: output the traveltime field and adjoint field of all sources at each iteration. Default: false. File: 'out_data_sim_group_X'.
|
||||
output_kernel: false # True: output sensitivity kernel and kernel density. Default: false. File: 'out_data_sim_group_X'.
|
||||
output_final_model: true # True: output merged final model. This file can be used as the input model for TomoATT. Default: true. File: 'model_final.h5'.
|
||||
output_middle_model: false # True: output merged intermediate models during inversion. This file can be used as the input model for TomoATT. Default: false. File: 'middle_model_step_XXXX.h5'
|
||||
output_in_process: false # True: output at each inv iteration, otherwise, only output step 0, Niter-1, Niter. Default: true. File: 'out_data_sim_group_0'.
|
||||
output_in_process_data: false # True: output src_rec_file at each inv iteration, otherwise, only output step 0, Niter-2, Niter-1. Default: true. File: 'src_rec_file_step_XXXX.dat'
|
||||
single_precision_output: false # True: output results in single precision. Default: false.
|
||||
verbose_output_level: 0 # output internal parameters, (to do).
|
||||
output_file_format: 0 # 0: hdf5, 1: ascii
|
||||
|
||||
# output files:
|
||||
# File: 'out_data_grid.h5'. Keys: ['Mesh']['elem_conn'], element index;
|
||||
# ['Mesh']['node_coords_p'], phi coordinates of nodes;
|
||||
# ['Mesh']['node_coords_t'], theta coordinates of nodes;
|
||||
# ['Mesh']['node_coords_r'], r coordinates of nodes;
|
||||
# ['Mesh']['node_coords_x'], phi coordinates of elements;
|
||||
# ['Mesh']['node_coords_y'], theta coordinates of elements;
|
||||
# ['Mesh']['node_coords_z'], r coordinates of elements;
|
||||
# File: 'out_data_sim_group_0'. Keys: ['model']['vel_inv_XXXX'], velocity model at iteration XXXX;
|
||||
# ['model']['xi_inv_XXXX'], xi model at iteration XXXX;
|
||||
# ['model']['eta_inv_XXXX'], eta model at iteration XXXX
|
||||
# ['model']['Ks_inv_XXXX'], sensitivity kernel related to slowness at iteration XXXX
|
||||
# ['model']['Kxi_inv_XXXX'], sensitivity kernel related to xi at iteration XXXX
|
||||
# ['model']['Keta_inv_XXXX'], sensitivity kernel related to eta at iteration XXXX
|
||||
# ['model']['Ks_density_inv_XXXX'], kernel density of Ks at iteration XXXX
|
||||
# ['model']['Kxi_density_inv_XXXX'], kernel density of Kxi at iteration XXXX
|
||||
# ['model']['Keta_density_inv_XXXX'], kernel density of Keta at iteration XXXX
|
||||
# ['model']['Ks_over_Kden_inv_XXXX'], slowness kernel over kernel density at iteration XXXX
|
||||
# ['model']['Kxi_over_Kden_inv_XXXX'], xi kernel over kernel density at iteration XXXX
|
||||
# ['model']['Keta_over_Kden_inv_XXXX'], eta kernel over kernel density at iteration XXXX
|
||||
# ['model']['Ks_update_inv_XXXX'], slowness kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['model']['Kxi_update_inv_XXXX'], xi kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['model']['Keta_update_inv_XXXX'], eta kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['1dinv']['vel_1dinv_inv_XXXX'], 2d velocity model at iteration XXXX, in 1d inversion mode
|
||||
# ['1dinv']['r_1dinv'], r coordinates (depth), in 1d inversion mode
|
||||
# ['1dinv']['t_1dinv'], t coordinates (epicenter distance), in 1d inversion mode
|
||||
# File: 'src_rec_file_step_XXXX.dat' or 'src_rec_file_forward.dat'. The synthetic traveltime data file.
|
||||
# File: 'final_model.h5'. Keys: ['eta'], ['xi'], ['vel'], the final model.
|
||||
# File: 'middle_model_step_XXXX.h5'. Keys: ['eta'], ['xi'], ['vel'], the model at step XXXX.
|
||||
# File: 'inversion_grid.txt'. The location of inversion grid nodes
|
||||
# File: 'objective_function.txt'. The objective function value at each iteration
|
||||
# File: 'out_data_sim_group_X'. Keys: ['src_YYYY']['time_field_inv_XXXX'], traveltime field of source YYYY at iteration XXXX;
|
||||
# ['src_YYYY']['adjoint_field_inv_XXXX'], adjoint field of source YYYY at iteration XXXX;
|
||||
# ['1dinv']['time_field_1dinv_YYYY_inv_XXXX'], 2d traveltime field of source YYYY at iteration XXXX, in 1d inversion mode
|
||||
# ['1dinv']['adjoint_field_1dinv_YYYY_inv_XXXX'], 2d adjoint field of source YYYY at iteration XXXX, in 1d inversion mode
|
||||
|
||||
|
||||
#################################################
|
||||
# inversion or forward modeling #
|
||||
#################################################
|
||||
# run mode
|
||||
# 0 for forward simulation only,
|
||||
# 1 for inversion
|
||||
# 2 for earthquake relocation
|
||||
# 3 for inversion + earthquake relocation
|
||||
# 4 for 1d model inversion
|
||||
run_mode: 4
|
||||
|
||||
have_tele_data: false # An error will be reported if false but source out of study region is used. Default: false.
|
||||
|
||||
###################################################
|
||||
# model update parameters setting #
|
||||
###################################################
|
||||
model_update:
|
||||
max_iterations: 200 # maximum number of inversion iterations
|
||||
optim_method: 0 # optimization method. 0 : grad_descent, 1 : halve-stepping, 2 : lbfgs (EXPERIMENTAL)
|
||||
|
||||
#common parameters for all optim methods
|
||||
step_length: 0.02 # the initial step length of model perturbation. 0.01 means maximum 1% perturbation for each iteration.
|
||||
|
||||
# parameters for optim_method 0 (gradient_descent)
|
||||
optim_method_0:
|
||||
step_method: 1 # the method to modulate step size. 0: according to objective function; 1: according to gradient direction
|
||||
# if step_method:0. if objective function increase, step size -> step length * step_length_decay.
|
||||
step_length_decay: 0.9 # default: 0.9
|
||||
# if step_method:1. if the angle between the current and the previous gradients is greater than step_length_gradient_angle, step size -> step length * step_length_change[0].
|
||||
# otherwise, step size -> step length * step_length_change[1].
|
||||
step_length_gradient_angle: 120 # default: 120.0
|
||||
step_length_change: [0.5, 1.2] # default: [0.5,1.2]
|
||||
# Kdensity_coe is used to rescale the final kernel: kernel -> kernel / pow(density of kernel, Kdensity_coe). if Kdensity_coe > 0, the region with less data will be enhanced during the inversion
|
||||
# e.g., if Kdensity_coe = 0, kernel remains upchanged; if Kdensity_coe = 1, kernel is fully normalized. 0.5 or less is recommended if really required.
|
||||
Kdensity_coe: 0 # default: 0.0, limited range: 0.0 - 0.95
|
||||
|
||||
# smoothing
|
||||
smoothing:
|
||||
smooth_method: 0 # 0: multiparametrization, 1: laplacian smoothing (EXPERIMENTAL)
|
||||
l_smooth_rtp: [1, 1, 1] # smoothing coefficients for laplacian smoothing
|
||||
|
||||
# parameters for smooth method 0 (multigrid model parametrization)
|
||||
# inversion grid can be viewed in OUTPUT_FILES/inversion_grid.txt
|
||||
n_inversion_grid: 5 # number of inversion grid sets
|
||||
|
||||
uniform_inv_grid_dep: true # true if use uniform inversion grid for dep, false if use flexible inversion grid
|
||||
uniform_inv_grid_lat: true # true if use uniform inversion grid for lat, false if use flexible inversion grid
|
||||
uniform_inv_grid_lon: true # true if use uniform inversion grid for lon, false if use flexible inversion grid
|
||||
|
||||
# -------------- uniform inversion grid setting --------------
|
||||
# settings for uniform inversion grid
|
||||
n_inv_dep_lat_lon: [13, 9, 9] # number of the base inversion grid points
|
||||
min_max_dep_inv: [-10, 50] # depth in km (Radius of the earth is defined in config.h/R_earth)
|
||||
min_max_lat_inv: [0, 2] # latitude in degree
|
||||
min_max_lon_inv: [0, 2] # longitude in degree
|
||||
|
||||
# -------------- flexible inversion grid setting --------------
|
||||
# settings for flexible inversion grid
|
||||
dep_inv: [-10, 0, 10, 20, 30, 40, 50, 60] # inversion grid for vel in depth (km)
|
||||
lat_inv: [30, 30.2, 30.4, 30.6, 30.8, 31, 31.2, 31.4, 31.6, 31.8, 32] # inversion grid for vel in latitude (degree)
|
||||
lon_inv: [30, 30.2, 30.4, 30.6, 30.8, 31, 31.2, 31.4, 31.6, 31.8, 32] # inversion grid for vel in longitude (degree)
|
||||
trapezoid: [1, 0, 50] # usually set as [1.0, 0.0, 50.0] (default)
|
||||
|
||||
# Carefully change trapezoid and trapezoid_ani, if you really want to use trapezoid inversion grid, increasing the inversion grid spacing with depth to account for the worse data coverage in greater depths.
|
||||
# The trapezoid_ inversion grid with index (i,j,k) in longitude, latitude, and depth is defined as:
|
||||
# if dep_inv[k] < trapezoid[1], lon = lon_inv[i];
|
||||
# lat = lat_inv[j];
|
||||
# dep = dep_inv[k];
|
||||
# if trapezoid[1] <= dep_inv[k] < trapezoid[2], lon = mid_lon_inv+(lon_inv[i]-mid_lon_inv)*(dep_inv[k]-trapezoid[1])/(trapezoid[2]-trapezoid[1])*trapezoid[0];
|
||||
# lat = mid_lat_inv+(lat_inv[i]-mid_lat_inv)*(dep_inv[k]-trapezoid[1])/(trapezoid[2]-trapezoid[1])*trapezoid[0];
|
||||
# dep = dep_inv[k];
|
||||
# if trapezoid[2] <= dep_inv[k], lon = mid_lon_inv+(lon_inv[i]-mid_lon_inv)*trapezoid[0];
|
||||
# lat = mid_lat_inv+(lat_inv[i]-mid_lat_inv)*trapezoid[0];
|
||||
# dep = dep_inv[k];
|
||||
# The shape of trapezoid inversion gird (x) looks like:
|
||||
#
|
||||
# lon_inv[0] [1] [2] [3] [4]
|
||||
# |<-------- (lon_inv[end] - lon_inv[0]) ---->|
|
||||
# dep_inv[0] | x x x x x |
|
||||
# | |
|
||||
# dep_inv[1] | x x x x x |
|
||||
# | |
|
||||
# dep_inv[2] = trapezoid[1] / x x x x x \
|
||||
# / \
|
||||
# dep_inv[3] / x x x x x \
|
||||
# / \
|
||||
# dep_inv[4] = trapezoid[2] / x x x x x \
|
||||
# | |
|
||||
# dep_inv[5] | x x x x x |
|
||||
# | |
|
||||
# dep_inv[6] | x x x x x |
|
||||
# |<---- trapezoid[0]* (lon_inv[end] - lon_inv[0]) ------>|
|
||||
|
||||
|
||||
# In the following data subsection, XXX_weight means a weight is assigned to the data, influencing the objective function and gradient
|
||||
# XXX_weight : [d1,d2,w1,w2] means:
|
||||
# if XXX < d1, weight = w1
|
||||
# if d1 <= XXX < d2, weight = w1 + (XXX-d1)/(d2-d1)*(w2-w1), (linear interpolation)
|
||||
# if d2 <= XXX , weight = w2
|
||||
# You can easily set w1 = w2 = 1.0 to normalize the weight related to XXX.
|
||||
# -------------- using absolute traveltime data --------------
|
||||
abs_time:
|
||||
use_abs_time: true # 'true' for using absolute traveltime data to update model parameters; 'false' for not using (no need to set parameters in this section)
|
||||
residual_weight: [1, 3, 1, 1] # XXX is the absolute traveltime residual (second) = abs(t^{obs}_{n,i} - t^{syn}_{n,j})
|
||||
distance_weight: [100, 200, 1, 1] # XXX is epicenter distance (km) between the source and receiver related to the data
|
||||
|
||||
# -------------- using common source differential traveltime data --------------
|
||||
cs_dif_time:
|
||||
use_cs_time: false # 'true' for using common source differential traveltime data to update model parameters; 'false' for not using (no need to set parameters in this section)
|
||||
residual_weight: [1, 3, 1, 1] # XXX is the common source differential traveltime residual (second) = abs(t^{obs}_{n,i} - t^{obs}_{n,j} - t^{syn}_{n,i} + t^{syn}_{n,j}).
|
||||
azimuthal_weight: [15, 30, 1, 1] # XXX is the azimuth difference between two separate stations related to the common source.
|
||||
|
||||
# -------------- using common receiver differential traveltime data --------------
|
||||
cr_dif_time:
|
||||
use_cr_time: false # 'true' for using common receiver differential traveltime data to update model parameters; 'false' for not using (no need to set parameters in this section)
|
||||
residual_weight: [1, 3, 1, 1] # XXX is the common receiver differential traveltime residual (second) = abs(t^{obs}_{n,i} - t^{obs}_{m,i} - t^{syn}_{n,i} + t^{syn}_{m,i})
|
||||
azimuthal_weight: [15, 30, 1, 1] # XXX is the azimuth difference between two separate sources related to the common receiver.
|
||||
|
||||
# -------------- global weight of different types of data (to balance the weight of different data) --------------
|
||||
global_weight:
|
||||
balance_data_weight: false # yes: over the total weight of the each type of the data. no: use original weight (below weight for each type of data needs to be set)
|
||||
abs_time_weight: 1 # weight of absolute traveltime data after balance, default: 1.0
|
||||
cs_dif_time_local_weight: 1 # weight of common source differential traveltime data after balance, default: 1.0
|
||||
cr_dif_time_local_weight: 1 # weight of common receiver differential traveltime data after balance, default: 1.0
|
||||
teleseismic_weight: 1 # weight of teleseismic data after balance, default: 1.0 (exclude in this version)
|
||||
|
||||
# -------------- inversion parameters --------------
|
||||
update_slowness : true # update slowness (velocity) or not. default: true
|
||||
update_azi_ani : false # update azimuthal anisotropy (xi, eta) or not. default: false
|
||||
@@ -0,0 +1,215 @@
|
||||
version: 3
|
||||
|
||||
#################################################
|
||||
# computational domian #
|
||||
#################################################
|
||||
domain:
|
||||
min_max_dep: [-10, 50] # depth in km
|
||||
min_max_lat: [0, 2] # latitude in degree
|
||||
min_max_lon: [0, 2] # longitude in degree
|
||||
n_rtp: [61, 61, 61] # number of nodes in depth,latitude,longitude direction
|
||||
|
||||
#################################################
|
||||
# traveltime data file path #
|
||||
#################################################
|
||||
source:
|
||||
src_rec_file: 1_src_rec_files/src_rec_config.dat # source receiver file path
|
||||
swap_src_rec: true # swap source and receiver (only valid for regional source and receiver, those of tele remain unchanged)
|
||||
|
||||
#################################################
|
||||
# initial model file path #
|
||||
#################################################
|
||||
model:
|
||||
init_model_path: 2_models/model_ckb_N61_61_61.h5 # path to initial model file
|
||||
# model_1d_name: dummy_model_1d_name # 1D model name used in teleseismic 2D solver (iasp91, ak135, user_defined is available), defined in include/1d_model.h
|
||||
|
||||
#################################################
|
||||
# parallel computation settings #
|
||||
#################################################
|
||||
parallel: # parameters for parallel computation
|
||||
n_sims: 8 # number of simultanoues runs (parallel the sources)
|
||||
ndiv_rtp: [1, 1, 1] # number of subdivision on each direction (parallel the computional domain)
|
||||
nproc_sub: 1 # number of processors for sweep parallelization (parallel the fast sweep method)
|
||||
use_gpu: false # true if use gpu (EXPERIMENTAL)
|
||||
|
||||
############################################
|
||||
# output file setting #
|
||||
############################################
|
||||
output_setting:
|
||||
output_dir: OUTPUT_FILES/OUTPUT_FILES_1dinv_signal # path to output director (default is ./OUTPUT_FILES/)
|
||||
output_source_field: false # True: output the traveltime field and adjoint field of all sources at each iteration. Default: false. File: 'out_data_sim_group_X'.
|
||||
output_kernel: false # True: output sensitivity kernel and kernel density. Default: false. File: 'out_data_sim_group_X'.
|
||||
output_final_model: true # True: output merged final model. This file can be used as the input model for TomoATT. Default: true. File: 'model_final.h5'.
|
||||
output_middle_model: false # True: output merged intermediate models during inversion. This file can be used as the input model for TomoATT. Default: false. File: 'middle_model_step_XXXX.h5'
|
||||
output_in_process: false # True: output at each inv iteration, otherwise, only output step 0, Niter-1, Niter. Default: true. File: 'out_data_sim_group_0'.
|
||||
output_in_process_data: false # True: output src_rec_file at each inv iteration, otherwise, only output step 0, Niter-2, Niter-1. Default: true. File: 'src_rec_file_step_XXXX.dat'
|
||||
single_precision_output: false # True: output results in single precision. Default: false.
|
||||
verbose_output_level: 0 # output internal parameters, (to do).
|
||||
output_file_format: 0 # 0: hdf5, 1: ascii
|
||||
|
||||
# output files:
|
||||
# File: 'out_data_grid.h5'. Keys: ['Mesh']['elem_conn'], element index;
|
||||
# ['Mesh']['node_coords_p'], phi coordinates of nodes;
|
||||
# ['Mesh']['node_coords_t'], theta coordinates of nodes;
|
||||
# ['Mesh']['node_coords_r'], r coordinates of nodes;
|
||||
# ['Mesh']['node_coords_x'], phi coordinates of elements;
|
||||
# ['Mesh']['node_coords_y'], theta coordinates of elements;
|
||||
# ['Mesh']['node_coords_z'], r coordinates of elements;
|
||||
# File: 'out_data_sim_group_0'. Keys: ['model']['vel_inv_XXXX'], velocity model at iteration XXXX;
|
||||
# ['model']['xi_inv_XXXX'], xi model at iteration XXXX;
|
||||
# ['model']['eta_inv_XXXX'], eta model at iteration XXXX
|
||||
# ['model']['Ks_inv_XXXX'], sensitivity kernel related to slowness at iteration XXXX
|
||||
# ['model']['Kxi_inv_XXXX'], sensitivity kernel related to xi at iteration XXXX
|
||||
# ['model']['Keta_inv_XXXX'], sensitivity kernel related to eta at iteration XXXX
|
||||
# ['model']['Ks_density_inv_XXXX'], kernel density of Ks at iteration XXXX
|
||||
# ['model']['Kxi_density_inv_XXXX'], kernel density of Kxi at iteration XXXX
|
||||
# ['model']['Keta_density_inv_XXXX'], kernel density of Keta at iteration XXXX
|
||||
# ['model']['Ks_over_Kden_inv_XXXX'], slowness kernel over kernel density at iteration XXXX
|
||||
# ['model']['Kxi_over_Kden_inv_XXXX'], xi kernel over kernel density at iteration XXXX
|
||||
# ['model']['Keta_over_Kden_inv_XXXX'], eta kernel over kernel density at iteration XXXX
|
||||
# ['model']['Ks_update_inv_XXXX'], slowness kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['model']['Kxi_update_inv_XXXX'], xi kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['model']['Keta_update_inv_XXXX'], eta kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['1dinv']['vel_1dinv_inv_XXXX'], 2d velocity model at iteration XXXX, in 1d inversion mode
|
||||
# ['1dinv']['r_1dinv'], r coordinates (depth), in 1d inversion mode
|
||||
# ['1dinv']['t_1dinv'], t coordinates (epicenter distance), in 1d inversion mode
|
||||
# File: 'src_rec_file_step_XXXX.dat' or 'src_rec_file_forward.dat'. The synthetic traveltime data file.
|
||||
# File: 'final_model.h5'. Keys: ['eta'], ['xi'], ['vel'], the final model.
|
||||
# File: 'middle_model_step_XXXX.h5'. Keys: ['eta'], ['xi'], ['vel'], the model at step XXXX.
|
||||
# File: 'inversion_grid.txt'. The location of inversion grid nodes
|
||||
# File: 'objective_function.txt'. The objective function value at each iteration
|
||||
# File: 'out_data_sim_group_X'. Keys: ['src_YYYY']['time_field_inv_XXXX'], traveltime field of source YYYY at iteration XXXX;
|
||||
# ['src_YYYY']['adjoint_field_inv_XXXX'], adjoint field of source YYYY at iteration XXXX;
|
||||
# ['1dinv']['time_field_1dinv_YYYY_inv_XXXX'], 2d traveltime field of source YYYY at iteration XXXX, in 1d inversion mode
|
||||
# ['1dinv']['adjoint_field_1dinv_YYYY_inv_XXXX'], 2d adjoint field of source YYYY at iteration XXXX, in 1d inversion mode
|
||||
|
||||
|
||||
#################################################
|
||||
# inversion or forward modeling #
|
||||
#################################################
|
||||
# run mode
|
||||
# 0 for forward simulation only,
|
||||
# 1 for inversion
|
||||
# 2 for earthquake relocation
|
||||
# 3 for inversion + earthquake relocation
|
||||
# 4 for 1d model inversion
|
||||
run_mode: 4
|
||||
|
||||
have_tele_data: false # An error will be reported if false but source out of study region is used. Default: false.
|
||||
|
||||
###################################################
|
||||
# model update parameters setting #
|
||||
###################################################
|
||||
model_update:
|
||||
max_iterations: 1 # maximum number of inversion iterations
|
||||
optim_method: 0 # optimization method. 0 : grad_descent, 1 : halve-stepping, 2 : lbfgs (EXPERIMENTAL)
|
||||
|
||||
#common parameters for all optim methods
|
||||
step_length: 0.02 # the initial step length of model perturbation. 0.01 means maximum 1% perturbation for each iteration.
|
||||
|
||||
# parameters for optim_method 0 (gradient_descent)
|
||||
optim_method_0:
|
||||
step_method: 1 # the method to modulate step size. 0: according to objective function; 1: according to gradient direction
|
||||
# if step_method:0. if objective function increase, step size -> step length * step_length_decay.
|
||||
step_length_decay: 0.9 # default: 0.9
|
||||
# if step_method:1. if the angle between the current and the previous gradients is greater than step_length_gradient_angle, step size -> step length * step_length_change[0].
|
||||
# otherwise, step size -> step length * step_length_change[1].
|
||||
step_length_gradient_angle: 120 # default: 120.0
|
||||
step_length_change: [0.5, 1.2] # default: [0.5,1.2]
|
||||
# Kdensity_coe is used to rescale the final kernel: kernel -> kernel / pow(density of kernel, Kdensity_coe). if Kdensity_coe > 0, the region with less data will be enhanced during the inversion
|
||||
# e.g., if Kdensity_coe = 0, kernel remains upchanged; if Kdensity_coe = 1, kernel is fully normalized. 0.5 or less is recommended if really required.
|
||||
Kdensity_coe: 0 # default: 0.0, limited range: 0.0 - 0.95
|
||||
|
||||
# smoothing
|
||||
smoothing:
|
||||
smooth_method: 0 # 0: multiparametrization, 1: laplacian smoothing (EXPERIMENTAL)
|
||||
l_smooth_rtp: [1, 1, 1] # smoothing coefficients for laplacian smoothing
|
||||
|
||||
# parameters for smooth method 0 (multigrid model parametrization)
|
||||
# inversion grid can be viewed in OUTPUT_FILES/inversion_grid.txt
|
||||
n_inversion_grid: 5 # number of inversion grid sets
|
||||
|
||||
uniform_inv_grid_dep: false # true if use uniform inversion grid for dep, false if use flexible inversion grid
|
||||
uniform_inv_grid_lat: true # true if use uniform inversion grid for lat, false if use flexible inversion grid
|
||||
uniform_inv_grid_lon: true # true if use uniform inversion grid for lon, false if use flexible inversion grid
|
||||
|
||||
# -------------- uniform inversion grid setting --------------
|
||||
# settings for uniform inversion grid
|
||||
n_inv_dep_lat_lon: [12, 9, 9] # number of the base inversion grid points
|
||||
min_max_dep_inv: [-10, 50] # depth in km (Radius of the earth is defined in config.h/R_earth)
|
||||
min_max_lat_inv: [0, 2] # latitude in degree
|
||||
min_max_lon_inv: [0, 2] # longitude in degree
|
||||
|
||||
# -------------- flexible inversion grid setting --------------
|
||||
# settings for flexible inversion grid
|
||||
dep_inv: [-10, 0, 10, 20, 30, 40, 50, 60] # inversion grid for vel in depth (km)
|
||||
lat_inv: [30, 30.2, 30.4, 30.6, 30.8, 31, 31.2, 31.4, 31.6, 31.8, 32] # inversion grid for vel in latitude (degree)
|
||||
lon_inv: [30, 30.2, 30.4, 30.6, 30.8, 31, 31.2, 31.4, 31.6, 31.8, 32] # inversion grid for vel in longitude (degree)
|
||||
trapezoid: [1, 0, 50] # usually set as [1.0, 0.0, 50.0] (default)
|
||||
|
||||
# Carefully change trapezoid and trapezoid_ani, if you really want to use trapezoid inversion grid, increasing the inversion grid spacing with depth to account for the worse data coverage in greater depths.
|
||||
# The trapezoid_ inversion grid with index (i,j,k) in longitude, latitude, and depth is defined as:
|
||||
# if dep_inv[k] < trapezoid[1], lon = lon_inv[i];
|
||||
# lat = lat_inv[j];
|
||||
# dep = dep_inv[k];
|
||||
# if trapezoid[1] <= dep_inv[k] < trapezoid[2], lon = mid_lon_inv+(lon_inv[i]-mid_lon_inv)*(dep_inv[k]-trapezoid[1])/(trapezoid[2]-trapezoid[1])*trapezoid[0];
|
||||
# lat = mid_lat_inv+(lat_inv[i]-mid_lat_inv)*(dep_inv[k]-trapezoid[1])/(trapezoid[2]-trapezoid[1])*trapezoid[0];
|
||||
# dep = dep_inv[k];
|
||||
# if trapezoid[2] <= dep_inv[k], lon = mid_lon_inv+(lon_inv[i]-mid_lon_inv)*trapezoid[0];
|
||||
# lat = mid_lat_inv+(lat_inv[i]-mid_lat_inv)*trapezoid[0];
|
||||
# dep = dep_inv[k];
|
||||
# The shape of trapezoid inversion gird (x) looks like:
|
||||
#
|
||||
# lon_inv[0] [1] [2] [3] [4]
|
||||
# |<-------- (lon_inv[end] - lon_inv[0]) ---->|
|
||||
# dep_inv[0] | x x x x x |
|
||||
# | |
|
||||
# dep_inv[1] | x x x x x |
|
||||
# | |
|
||||
# dep_inv[2] = trapezoid[1] / x x x x x \
|
||||
# / \
|
||||
# dep_inv[3] / x x x x x \
|
||||
# / \
|
||||
# dep_inv[4] = trapezoid[2] / x x x x x \
|
||||
# | |
|
||||
# dep_inv[5] | x x x x x |
|
||||
# | |
|
||||
# dep_inv[6] | x x x x x |
|
||||
# |<---- trapezoid[0]* (lon_inv[end] - lon_inv[0]) ------>|
|
||||
|
||||
|
||||
# In the following data subsection, XXX_weight means a weight is assigned to the data, influencing the objective function and gradient
|
||||
# XXX_weight : [d1,d2,w1,w2] means:
|
||||
# if XXX < d1, weight = w1
|
||||
# if d1 <= XXX < d2, weight = w1 + (XXX-d1)/(d2-d1)*(w2-w1), (linear interpolation)
|
||||
# if d2 <= XXX , weight = w2
|
||||
# You can easily set w1 = w2 = 1.0 to normalize the weight related to XXX.
|
||||
# -------------- using absolute traveltime data --------------
|
||||
abs_time:
|
||||
use_abs_time: true # 'true' for using absolute traveltime data to update model parameters; 'false' for not using (no need to set parameters in this section)
|
||||
residual_weight: [1, 3, 1, 1] # XXX is the absolute traveltime residual (second) = abs(t^{obs}_{n,i} - t^{syn}_{n,j})
|
||||
distance_weight: [100, 200, 1, 1] # XXX is epicenter distance (km) between the source and receiver related to the data
|
||||
|
||||
# -------------- using common source differential traveltime data --------------
|
||||
cs_dif_time:
|
||||
use_cs_time: false # 'true' for using common source differential traveltime data to update model parameters; 'false' for not using (no need to set parameters in this section)
|
||||
residual_weight: [1, 3, 1, 1] # XXX is the common source differential traveltime residual (second) = abs(t^{obs}_{n,i} - t^{obs}_{n,j} - t^{syn}_{n,i} + t^{syn}_{n,j}).
|
||||
azimuthal_weight: [15, 30, 1, 1] # XXX is the azimuth difference between two separate stations related to the common source.
|
||||
|
||||
# -------------- using common receiver differential traveltime data --------------
|
||||
cr_dif_time:
|
||||
use_cr_time: false # 'true' for using common receiver differential traveltime data to update model parameters; 'false' for not using (no need to set parameters in this section)
|
||||
residual_weight: [1, 3, 1, 1] # XXX is the common receiver differential traveltime residual (second) = abs(t^{obs}_{n,i} - t^{obs}_{m,i} - t^{syn}_{n,i} + t^{syn}_{m,i})
|
||||
azimuthal_weight: [15, 30, 1, 1] # XXX is the azimuth difference between two separate sources related to the common receiver.
|
||||
|
||||
# -------------- global weight of different types of data (to balance the weight of different data) --------------
|
||||
global_weight:
|
||||
balance_data_weight: false # yes: over the total weight of the each type of the data. no: use original weight (below weight for each type of data needs to be set)
|
||||
abs_time_weight: 1 # weight of absolute traveltime data after balance, default: 1.0
|
||||
cs_dif_time_local_weight: 1 # weight of common source differential traveltime data after balance, default: 1.0
|
||||
cr_dif_time_local_weight: 1 # weight of common receiver differential traveltime data after balance, default: 1.0
|
||||
teleseismic_weight: 1 # weight of teleseismic data after balance, default: 1.0 (exclude in this version)
|
||||
|
||||
# -------------- inversion parameters --------------
|
||||
update_slowness : true # update slowness (velocity) or not. default: true
|
||||
update_azi_ani : false # update azimuthal anisotropy (xi, eta) or not. default: false
|
||||
37
examples/eg4_1d_inversion/plot_output.py
Normal file
37
examples/eg4_1d_inversion/plot_output.py
Normal file
@@ -0,0 +1,37 @@
|
||||
import h5py
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import os
|
||||
|
||||
try:
|
||||
os.mkdir("img")
|
||||
except:
|
||||
pass
|
||||
|
||||
dep = np.linspace(50,-10, 61)
|
||||
|
||||
with h5py.File("OUTPUT_FILES/OUTPUT_FILES_1dinv_inv/final_model.h5", "r") as f:
|
||||
vel_final= np.array(f["vel"])
|
||||
with h5py.File("2_models/model_init_N61_61_61.h5", "r") as f:
|
||||
vel_init = np.array(f["vel"])
|
||||
with h5py.File("2_models/model_ckb_N61_61_61.h5", "r") as f:
|
||||
vel_ckb = np.array(f["vel"])
|
||||
|
||||
fig = plt.figure(figsize=(6, 6))
|
||||
ax = fig.add_subplot(111)
|
||||
ax.plot(vel_init[:,0,0] , dep, label="init")
|
||||
ax.plot(vel_ckb[:,0,0], dep, label="ckb")
|
||||
ax.plot(vel_final[:,0,0], dep, label="inv")
|
||||
ax.grid()
|
||||
ax.set_xlabel("Velocity (m/s)",fontsize=16)
|
||||
ax.set_ylabel("Depth (km)",fontsize=16)
|
||||
ax.get_xaxis().set_tick_params(labelsize=16)
|
||||
ax.get_yaxis().set_tick_params(labelsize=16)
|
||||
ax.set_xlim([4.5,8.5])
|
||||
ax.set_ylim([0,50])
|
||||
|
||||
plt.gca().invert_yaxis()
|
||||
plt.legend(fontsize=16)
|
||||
|
||||
plt.show()
|
||||
fig.savefig("img/1d_model_inversion.png", dpi=300, bbox_inches="tight", edgecolor="w", facecolor="w")
|
||||
64
examples/eg4_1d_inversion/prepare_input_files.py
Normal file
64
examples/eg4_1d_inversion/prepare_input_files.py
Normal file
@@ -0,0 +1,64 @@
|
||||
# %%
|
||||
# download src_ref_files from Zenodo
|
||||
import os
|
||||
import numpy as np
|
||||
import sys
|
||||
try:
|
||||
from pytomoatt.model import ATTModel
|
||||
from pytomoatt.checkerboard import Checker
|
||||
from pytomoatt.src_rec import SrcRec
|
||||
except:
|
||||
print("ERROR: ATTModel not found. Please install pytomoatt first."
|
||||
"See https://tomoatt.github.io/PyTomoATT/installation.html for details.")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
class BuildInitialModel():
|
||||
def __init__(self, par_file="./3_input_params/input_params_signal.yaml", output_dir="2_models"):
|
||||
"""
|
||||
Build initial model for tomography inversion
|
||||
"""
|
||||
self.am = ATTModel(par_file)
|
||||
self.output_dir = output_dir
|
||||
|
||||
def build_initial_model(self, vel_min=5.0, vel_max=8.0):
|
||||
"""
|
||||
Build initial model for tomography inversion
|
||||
"""
|
||||
self.am.vel[self.am.depths < 0, :, :] = vel_min
|
||||
idx = np.where((0 <= self.am.depths) & (self.am.depths < 40.0))[0]
|
||||
self.am.vel[idx, :, :] = np.linspace(vel_min, vel_max, idx.size)[::-1][:, np.newaxis, np.newaxis]
|
||||
self.am.vel[self.am.depths >= 40.0, :, :] = vel_max
|
||||
|
||||
def build_ckb_model(self):
|
||||
"""
|
||||
Build checkerboard model for tomography inversion
|
||||
"""
|
||||
nr = self.am.n_rtp[0]
|
||||
for ir in range(nr):
|
||||
dep = self.am.depths[ir]
|
||||
self.am.vel[ir, :, :] = (1 + 0.05 * np.sin(np.pi * dep / 10.0)) * self.am.vel[ir, :, :]
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# download src_rec_config.dat
|
||||
url = 'https://zenodo.org/records/14053821/files/src_rec_config.dat'
|
||||
path = "1_src_rec_files/src_rec_config.dat"
|
||||
os.makedirs(os.path.dirname(path), exist_ok=True)
|
||||
if not os.path.exists(path):
|
||||
sr = SrcRec.read(url)
|
||||
sr.write(path)
|
||||
|
||||
# build initial model
|
||||
output_dir = "2_models"
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
bim = BuildInitialModel(output_dir=output_dir)
|
||||
bim.build_initial_model()
|
||||
bim.am.write('{}/model_init_N{:d}_{:d}_{:d}.h5'.format(bim.output_dir, *bim.am.n_rtp))
|
||||
|
||||
bim.build_ckb_model()
|
||||
bim.am.write('{}/model_ckb_N{:d}_{:d}_{:d}.h5'.format(bim.output_dir, *bim.am.n_rtp))
|
||||
|
||||
|
||||
|
||||
27
examples/eg4_1d_inversion/run_this_example.sh
Normal file
27
examples/eg4_1d_inversion/run_this_example.sh
Normal file
@@ -0,0 +1,27 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
# Step 1: Generate necessary input files
|
||||
echo "Generating TomoATT input files..."
|
||||
python prepare_input_files.py
|
||||
|
||||
# Step 2: Run forward modeling
|
||||
# # for WSL
|
||||
# mpirun -n 8 --allow-run-as-root --oversubscribe ../../build/bin/TOMOATT -i 3_input_params/input_params_1dinv_signal.yaml
|
||||
# # for Linux
|
||||
# mpirun -n 8 ../../build/bin/TOMOATT -i 3_input_params/input_params_1dinv_signal.yaml
|
||||
# for conda install
|
||||
mpirun -n 8 TOMOATT -i 3_input_params/input_params_1dinv_signal.yaml
|
||||
|
||||
# Step 3: Do inversion
|
||||
# # for WSL
|
||||
# mpirun -n 8 --allow-run-as-root --oversubscribe ../../build/bin/TOMOATT -i 3_input_params/input_params_1dinv_inv.yaml
|
||||
# # for Linux
|
||||
# mpirun -n 8 ../../build/bin/TOMOATT -i 3_input_params/input_params_1dinv_inv.yaml
|
||||
# for conda install
|
||||
mpirun -n 8 TOMOATT -i 3_input_params/input_params_1dinv_inv.yaml
|
||||
|
||||
# Step 4 (Optional): Plot the results
|
||||
echo "Plotting the results..."
|
||||
python plot_output.py
|
||||
|
||||
BIN
examples/realcase1_regional_tomography_California/.DS_Store
vendored
Normal file
BIN
examples/realcase1_regional_tomography_California/.DS_Store
vendored
Normal file
Binary file not shown.
@@ -0,0 +1,186 @@
|
||||
version: 3
|
||||
|
||||
#################################################
|
||||
# computational domian #
|
||||
#################################################
|
||||
domain:
|
||||
min_max_dep: [-5, 45] # depth in km
|
||||
min_max_lat: [-2.0, 2.4] # latitude in degree
|
||||
min_max_lon: [-0.8, 0.8] # longitude in degree
|
||||
n_rtp: [51, 89, 33] # number of nodes in depth,latitude,longitude direction
|
||||
|
||||
#################################################
|
||||
# traveltime data file path #
|
||||
#################################################
|
||||
source:
|
||||
src_rec_file: 1_src_rec_files/src_rec_file.dat # source receiver file path
|
||||
swap_src_rec: true # swap source and receiver
|
||||
|
||||
#################################################
|
||||
# initial model file path #
|
||||
#################################################
|
||||
model:
|
||||
init_model_path: 2_models/model_init_N51_89_33.h5 # path to initial model file
|
||||
|
||||
#################################################
|
||||
# parallel computation settings #
|
||||
#################################################
|
||||
parallel: # parameters for parallel computation
|
||||
n_sims: 8 # number of simultanoues runs (parallel the sources)
|
||||
ndiv_rtp: [1, 1, 1] # number of subdivision on each direction (parallel the computional domain)
|
||||
|
||||
############################################
|
||||
# output file setting #
|
||||
############################################
|
||||
output_setting:
|
||||
output_dir: OUTPUT_FILES/OUTPUT_FILES_inv # path to output director (default is ./OUTPUT_FILES/)
|
||||
output_source_field: false # True: output the traveltime field and adjoint field of all sources at each iteration. Default: false. File: 'out_data_sim_group_X'.
|
||||
output_kernel: true
|
||||
output_final_model: true # True: output merged final model. This file can be used as the input model for TomoATT. Default: true. File: 'model_final.h5'.
|
||||
output_middle_model: true # True: output merged intermediate models during inversion. This file can be used as the input model for TomoATT. Default: false. File: 'middle_model_step_XXXX.h5'
|
||||
output_in_process: true # True: output at each inv iteration, otherwise, only output step 0, Niter-1, Niter. Default: true. File: 'out_data_sim_group_0'.
|
||||
output_in_process_data: true # True: output src_rec_file at each inv iteration, otherwise, only output step 0, Niter-2, Niter-1. Default: true. File: 'src_rec_file_step_XXXX.dat'
|
||||
single_precision_output: false # True: output results in single precision. Default: false.
|
||||
verbose_output_level: 0 # output internal parameters, (to do)
|
||||
output_file_format: 0 # 0: hdf5, 1: ascii
|
||||
|
||||
# output files:
|
||||
# File: 'out_data_grid.h5'. Keys: ['Mesh']['elem_conn'], element index;
|
||||
# ['Mesh']['node_coords_p'], phi coordinates of nodes;
|
||||
# ['Mesh']['node_coords_t'], theta coordinates of nodes;
|
||||
# ['Mesh']['node_coords_r'], r coordinates of nodes;
|
||||
# ['Mesh']['node_coords_x'], phi coordinates of elements;
|
||||
# ['Mesh']['node_coords_y'], theta coordinates of elements;
|
||||
# ['Mesh']['node_coords_z'], r coordinates of elements;
|
||||
# File: 'out_data_sim_group_0'. Keys: ['model']['vel_inv_XXXX'], velocity model at iteration XXXX;
|
||||
# ['model']['xi_inv_XXXX'], xi model at iteration XXXX;
|
||||
# ['model']['eta_inv_XXXX'], eta model at iteration XXXX
|
||||
# ['model']['Ks_inv_XXXX'], sensitivity kernel related to slowness at iteration XXXX
|
||||
# ['model']['Kxi_inv_XXXX'], sensitivity kernel related to xi at iteration XXXX
|
||||
# ['model']['Keta_inv_XXXX'], sensitivity kernel related to eta at iteration XXXX
|
||||
# ['model']['Ks_density_inv_XXXX'], kernel density of Ks at iteration XXXX
|
||||
# ['model']['Kxi_density_inv_XXXX'], kernel density of Kxi at iteration XXXX
|
||||
# ['model']['Keta_density_inv_XXXX'], kernel density of Keta at iteration XXXX
|
||||
# ['model']['Ks_over_Kden_inv_XXXX'], slowness kernel over kernel density at iteration XXXX
|
||||
# ['model']['Kxi_over_Kden_inv_XXXX'], xi kernel over kernel density at iteration XXXX
|
||||
# ['model']['Keta_over_Kden_inv_XXXX'], eta kernel over kernel density at iteration XXXX
|
||||
# ['model']['Ks_update_inv_XXXX'], slowness kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['model']['Kxi_update_inv_XXXX'], xi kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['model']['Keta_update_inv_XXXX'], eta kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['1dinv']['vel_1dinv_inv_XXXX'], 2d velocity model at iteration XXXX, in 1d inversion mode
|
||||
# ['1dinv']['r_1dinv'], r coordinates (depth), in 1d inversion mode
|
||||
# ['1dinv']['t_1dinv'], t coordinates (epicenter distance), in 1d inversion mode
|
||||
# File: 'src_rec_file_step_XXXX.dat' or 'src_rec_file_forward.dat'. The synthetic traveltime data file.
|
||||
# File: 'final_model.h5'. Keys: ['eta'], ['xi'], ['vel'], the final model.
|
||||
# File: 'middle_model_step_XXXX.h5'. Keys: ['eta'], ['xi'], ['vel'], the model at step XXXX.
|
||||
# File: 'inversion_grid.txt'. The location of inversion grid nodes
|
||||
# File: 'objective_function.txt'. The objective function value at each iteration
|
||||
# File: 'out_data_sim_group_X'. Keys: ['src_YYYY']['time_field_inv_XXXX'], traveltime field of source YYYY at iteration XXXX;
|
||||
# ['src_YYYY']['adjoint_field_inv_XXXX'], adjoint field of source YYYY at iteration XXXX;
|
||||
# ['1dinv']['time_field_1dinv_YYYY_inv_XXXX'], 2d traveltime field of source YYYY at iteration XXXX, in 1d inversion mode
|
||||
# ['1dinv']['adjoint_field_1dinv_YYYY_inv_XXXX'], 2d adjoint field of source YYYY at iteration XXXX, in 1d inversion mode
|
||||
|
||||
|
||||
#################################################
|
||||
# inversion or forward modeling #
|
||||
#################################################
|
||||
# run mode
|
||||
# 0 for forward simulation only,
|
||||
# 1 for inversion
|
||||
# 2 for earthquake relocation
|
||||
# 3 for inversion + earthquake relocation
|
||||
# 4 for 1d model inversion
|
||||
run_mode: 1
|
||||
|
||||
###################################################
|
||||
# model update parameters setting #
|
||||
###################################################
|
||||
model_update:
|
||||
max_iterations: 80 # maximum number of inversion iterations
|
||||
|
||||
step_length: 0.01 # the initial step length of model perturbation. 0.01 means maximum 1% perturbation for each iteration.
|
||||
|
||||
# parameters for optim_method 0 (gradient_descent)
|
||||
optim_method_0:
|
||||
# if step_method:1. if the angle between the current and the previous gradients is greater than step_length_gradient_angle, step size -> step length * step_length_change[0].
|
||||
# otherwise, step size -> step length * step_length_change[1].
|
||||
step_length_gradient_angle: 120 # default: 120.0
|
||||
step_length_change: [0.5, 1.41] # default: [0.5,1.2]
|
||||
Kdensity_coe: 0.3 # default: 0.0, range: 0.0 - 1.0
|
||||
|
||||
# parameters for smooth method 0 (multigrid model parametrization)
|
||||
# inversion grid can be viewed in OUTPUT_FILES/inversion_grid.txt
|
||||
n_inversion_grid: 5 # number of inversion grid sets
|
||||
|
||||
uniform_inv_grid_dep: false # true if use uniform inversion grid for dep, false if use flexible inversion grid
|
||||
uniform_inv_grid_lat: false # true if use uniform inversion grid for lat, false if use flexible inversion grid
|
||||
uniform_inv_grid_lon: false # true if use uniform inversion grid for lon, false if use flexible inversion grid
|
||||
|
||||
# settings for uniform inversion grid
|
||||
n_inv_dep_lat_lon: [3, 11, 11] # number of inversion grid in depth, latitude, and longitude direction
|
||||
min_max_dep_inv: [-5 , 5] # inversion grid for vel in depth (km)
|
||||
min_max_lat_inv: [0, 1] # inversion grid for vel in latitude (degree)
|
||||
min_max_lon_inv: [0, 1] # inversion grid for vel in longitude (degree)
|
||||
|
||||
# settings for flexible inversion grid
|
||||
dep_inv: [-5, -2, 0, 3, 7, 12, 17, 23, 30, 38, 47, 57] # inversion grid for vel in depth (km)
|
||||
lat_inv: [-2.5, -2.2, -1.9, -1.6, -1.3, -1.0, -0.7, -0.4, -0.1, 0.2, 0.5, 0.8, 1.1, 1.4, 1.7, 2.0, 2.3, 2.6] # inversion grid for vel in latitude (degree)
|
||||
lon_inv: [-1.2, -0.9, -0.6, -0.3, 0, 0.3, 0.6, 0.9, 1.2] # inversion grid for vel in longitude (degree)
|
||||
trapezoid: [1, 0, 50] # usually set as [1.0, 0.0, 50.0] (default)
|
||||
|
||||
# if we want to use another inversion grid for inverting anisotropy, set invgrid_ani: true (default: false)
|
||||
invgrid_ani: true
|
||||
|
||||
# ---------- flexible inversion grid setting for anisotropy ----------
|
||||
# settings for flexible inversion grid for anisotropy
|
||||
dep_inv_ani: [-5, -2, 0, 3, 7, 12, 17, 23, 30, 38, 47, 57] # inversion grid for ani in depth (km)
|
||||
lat_inv_ani: [-2.8, -2.3, -1.8, -1.3, -0.8, -0.3, 0.2, 0.7, 1.2, 1.7, 2.2, 2.7] # inversion grid for ani in latitude (degree)
|
||||
lon_inv_ani: [-1.2, -0.9, -0.6, -0.3, 0, 0.3, 0.6, 0.9, 1.2] # inversion grid for ani in longitude (degree)
|
||||
trapezoid_ani: [1, 0, 50] # usually set as [1.0, 0.0, 50.0] (default)
|
||||
|
||||
|
||||
# Carefully change trapezoid and trapezoid_ani, if you really want to use trapezoid inversion grid, increasing the inversion grid spacing with depth to account for the worse data coverage in greater depths.
|
||||
# The trapezoid_ inversion grid with index (i,j,k) in longitude, latitude, and depth is defined as:
|
||||
# if dep_inv[k] < trapezoid[1], lon = lon_inv[i];
|
||||
# lat = lat_inv[j];
|
||||
# dep = dep_inv[k];
|
||||
# if trapezoid[1] <= dep_inv[k] < trapezoid[2], lon = mid_lon_inv+(lon_inv[i]-mid_lon_inv)*(dep_inv[k]-trapezoid[1])/(trapezoid[2]-trapezoid[1])*trapezoid[0];
|
||||
# lat = mid_lat_inv+(lat_inv[i]-mid_lat_inv)*(dep_inv[k]-trapezoid[1])/(trapezoid[2]-trapezoid[1])*trapezoid[0];
|
||||
# dep = dep_inv[k];
|
||||
# if trapezoid[2] <= dep_inv[k], lon = mid_lon_inv+(lon_inv[i]-mid_lon_inv)*trapezoid[0];
|
||||
# lat = mid_lat_inv+(lat_inv[i]-mid_lat_inv)*trapezoid[0];
|
||||
# dep = dep_inv[k];
|
||||
# The shape of trapezoid inversion gird (x) looks like:
|
||||
#
|
||||
# lon_inv[0] [1] [2] [3] [4]
|
||||
# |<-------- (lon_inv[end] - lon_inv[0]) ---->|
|
||||
# dep_inv[0] | x x x x x |
|
||||
# | |
|
||||
# dep_inv[1] | x x x x x |
|
||||
# | |
|
||||
# dep_inv[2] = trapezoid[1] / x x x x x \
|
||||
# / \
|
||||
# dep_inv[3] / x x x x x \
|
||||
# / \
|
||||
# dep_inv[4] = trapezoid[2] / x x x x x \
|
||||
# | |
|
||||
# dep_inv[5] | x x x x x |
|
||||
# | |
|
||||
# dep_inv[6] | x x x x x |
|
||||
# |<---- trapezoid[0]* (lon_inv[end] - lon_inv[0]) ------>|
|
||||
|
||||
# -------------- using absolute traveltime data --------------
|
||||
abs_time:
|
||||
use_abs_time: true # 'true' for using absolute traveltime data to update model parameters; 'false' for not using (no need to set parameters in this section)
|
||||
|
||||
# -------------- using common source differential traveltime data --------------
|
||||
cs_dif_time:
|
||||
use_cs_time: false # 'true' for using common source differential traveltime data to update model parameters; 'false' for not using (no need to set parameters in this section)
|
||||
|
||||
# -------------- using common receiver differential traveltime data --------------
|
||||
cr_dif_time:
|
||||
use_cr_time: false # 'true' for using common receiver differential traveltime data to update model parameters; 'false' for not using (no need to set parameters in this section)
|
||||
|
||||
# -------------- inversion parameters --------------
|
||||
update_slowness : true # update slowness (velocity) or not. default: true
|
||||
update_azi_ani : true # update azimuthal anisotropy (xi, eta) or not. default: false
|
||||
30
examples/realcase1_regional_tomography_California/README.md
Normal file
30
examples/realcase1_regional_tomography_California/README.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# Real case of regional tomography in central California near Parkfield
|
||||
|
||||
This is a real case to invert traveltimes for velocity heterogeneity and azimuthal anisotropy in central California near Parkfield
|
||||
|
||||
Reference:
|
||||
|
||||
[1] J. Chen, G. Chen, M. Nagaso, and P. Tong, Adjoint-state traveltime tomography for azimuthally anisotropic media in spherical coordinates. Geophys. J. Int., 234 (2023), pp. 712-736.
|
||||
https://doi.org/10.1093/gji/ggad093
|
||||
|
||||
[2] J. Chen, M. Nagaso, M. Xu, and P. Tong, TomoATT: An open-source package for Eikonal equation-based adjoint-state traveltime tomography for seismic velocity and azimuthal anisotropy, submitted.
|
||||
https://doi.org/10.48550/arXiv.2412.00031
|
||||
|
||||
|
||||
Python modules are required to initiate the inversion and to plot final results:
|
||||
- h5py
|
||||
- PyTomoAT
|
||||
- Pygmt
|
||||
- gmt
|
||||
|
||||
Run this example:
|
||||
|
||||
1. Run bash script `bash run_this_example.sh` to execute the test.
|
||||
|
||||
2. After inversion, run `plot_output.py` to plot the results.
|
||||
|
||||
The imaging results:
|
||||
|
||||

|
||||
|
||||
|
||||
222
examples/realcase1_regional_tomography_California/plot_output.py
Normal file
222
examples/realcase1_regional_tomography_California/plot_output.py
Normal file
@@ -0,0 +1,222 @@
|
||||
# %%
|
||||
import pygmt
|
||||
pygmt.config(FONT="16p", IO_SEGMENT_MARKER="<<<")
|
||||
|
||||
import os
|
||||
|
||||
# %%
|
||||
from pytomoatt.model import ATTModel
|
||||
from pytomoatt.data import ATTData
|
||||
import numpy as np
|
||||
|
||||
# %%
|
||||
# read model files
|
||||
|
||||
Ngrid = [51,89,33]
|
||||
data_file = '2_models/model_init_N%d_%d_%d.h5'%(Ngrid[0],Ngrid[1],Ngrid[2])
|
||||
par_file = '3_input_params/input_params_real.yaml'
|
||||
model = ATTModel.read(data_file, par_file)
|
||||
initial_model = model.to_xarray()
|
||||
|
||||
data_file = 'OUTPUT_FILES/OUTPUT_FILES_real/final_model.h5'
|
||||
model = ATTModel.read(data_file, par_file)
|
||||
inv_model = model.to_xarray()
|
||||
|
||||
# %%
|
||||
# read earthquakes and stations
|
||||
|
||||
from pytomoatt.src_rec import SrcRec
|
||||
|
||||
# read src_rec_file
|
||||
sr = SrcRec.read("1_src_rec_files/src_rec_file.dat")
|
||||
|
||||
# rotate back to original coordinates
|
||||
central_lat = 35.6
|
||||
central_lon = -120.45
|
||||
rotation_angle = -30
|
||||
sr.rotate(central_lat, central_lon, rotation_angle, reverse=True)
|
||||
|
||||
# get the coordinates of the stations and earthquakes
|
||||
stations = sr.receivers[['stlo','stla','stel']].values.T
|
||||
earthquakes = sr.sources[['evlo','evla','evdp']].values.T
|
||||
|
||||
print(stations.shape)
|
||||
print(earthquakes.shape)
|
||||
|
||||
# %%
|
||||
# study region
|
||||
|
||||
import sys
|
||||
sys.path.append('../utils')
|
||||
import functions_for_data as ffd
|
||||
|
||||
lat1 = -1.8; lat2 = 2.2;
|
||||
lon1 = -0.7; lon2 = 0.7;
|
||||
|
||||
lat_lon_rotate = np.array([[lon1,lat1],[lon1,lat2],[lon2,lat2],[lon2,lat1],[lon1,lat1]])
|
||||
lat_lon = ffd.rtp_rotation_reverse(lat_lon_rotate[:,1],lat_lon_rotate[:,0],central_lat,central_lon,rotation_angle)
|
||||
studt_lat = lat_lon[0]
|
||||
studt_lon = lat_lon[1]
|
||||
|
||||
# %%
|
||||
# load topography
|
||||
region = [-122.8,-118.5,33.5,38]
|
||||
grid_topo = pygmt.datasets.load_earth_relief(resolution="01m", region=region)
|
||||
grid_gra = pygmt.grdgradient(grid = grid_topo, azimuth = 0)
|
||||
|
||||
# %%
|
||||
def line_read(file):
|
||||
doc=open(file,'r')
|
||||
file = doc.readlines()
|
||||
doc.close()
|
||||
lat = []; lon = [];
|
||||
for info in file:
|
||||
tmp = info.split()
|
||||
lon.append(float(tmp[0]))
|
||||
lat.append(float(tmp[1]))
|
||||
return((lat,lon))
|
||||
|
||||
# %%
|
||||
# plot imgaing results
|
||||
|
||||
fig = pygmt.Figure()
|
||||
try:
|
||||
os.mkdir("img")
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
# ------------------ Sub fig 1. topography ------------------
|
||||
region = [-122.8,-118.5,33.5,38]
|
||||
frame = ["xa1","ya1","nSWe"]
|
||||
projection = "M10c"
|
||||
|
||||
# topography
|
||||
pygmt.makecpt(cmap="globe", series=[-4000,4000], background = True)
|
||||
fig.grdimage(grid=grid_topo, shading = grid_gra, projection=projection, frame=frame,region=region)
|
||||
# study region
|
||||
fig.plot(x = studt_lon, y = studt_lat, pen = "1.5p,red")
|
||||
# earthquakes
|
||||
fig.plot(x = earthquakes[0,:], y = earthquakes[1,:], style = "c0.02c", fill = "red",label = "Earthquake")
|
||||
# stations
|
||||
fig.plot(x = stations[0,:], y = stations[1,:], style = "t0.2c", fill = "blue", pen = "white", label = "Station")
|
||||
|
||||
|
||||
fig.basemap(region=[0,1,0,1], frame=["wesn+gwhite"], projection="X4c/2c")
|
||||
fig.plot(x=0.1, y=0.3, style='c0.2c', fill='red')
|
||||
fig.text(text="Earthquake", x=0.2, y=0.3, font="16p,Helvetica", justify="LM")
|
||||
fig.plot(x=0.1, y=0.7, style='t0.4c', fill='blue', pen='black')
|
||||
fig.text(text="Station", x=0.2, y=0.7, font="16p,Helvetica", justify="LM")
|
||||
|
||||
|
||||
# ------------------ Sub fig 2. colorbar ------------------
|
||||
fig.shift_origin(xshift= 2, yshift= -2)
|
||||
|
||||
pygmt.makecpt(cmap="globe", series=[-4000,4000], background = True)
|
||||
fig.colorbar(frame = ["a%f"%(4000),"y+lElevation (m)"], position="+e+w4c/0.3c+h")
|
||||
fig.shift_origin(yshift=-2)
|
||||
|
||||
pygmt.makecpt(cmap="../utils/svel13_chen.cpt", series=[-8, 8], background=True, reverse=False)
|
||||
fig.colorbar(frame = ["a%f"%(4),"y+ldlnVp (%)"], position="+e+w4c/0.3c+h")
|
||||
fig.shift_origin(yshift=-2)
|
||||
|
||||
pygmt.makecpt(cmap="cool", series=[0, 0.08], background=True, reverse=False)
|
||||
fig.colorbar(frame = ["a%f"%(0.04),"y+lAnisotropy"], position="+ef+w4c/0.3c+h")
|
||||
|
||||
|
||||
|
||||
# ------------------ Sub fig 3. model ------------------
|
||||
fig.shift_origin(xshift = 10, yshift=8)
|
||||
|
||||
region_oblique = [-0.7,0.7,-2.2,1.8]
|
||||
projection = "OA%s/%s/%s/4c"%(central_lon,central_lat,rotation_angle-90.0)
|
||||
perspective = "30/90"
|
||||
spacing = "1m"
|
||||
|
||||
depth_list = [4,8,16]
|
||||
|
||||
for idepth, depth in enumerate(depth_list):
|
||||
|
||||
# initial model
|
||||
vel_init = initial_model.interp_dep(depth, field='vel')
|
||||
|
||||
# output model
|
||||
vel_inv = inv_model.interp_dep(depth, field='vel') # velocity
|
||||
epsilon_inv = inv_model.interp_dep(depth, field='epsilon') # magnitude of anisotropy
|
||||
|
||||
# fast velocity directions
|
||||
samp_interval = 3
|
||||
ani_thd = 0.015
|
||||
length = 20
|
||||
width = 0.1
|
||||
|
||||
ani_inv_phi = inv_model.interp_dep(depth, field='phi', samp_interval=samp_interval)
|
||||
ani_inv_epsilon = inv_model.interp_dep(depth, field='epsilon', samp_interval=samp_interval)
|
||||
ani_inv = np.hstack([ani_inv_phi, ani_inv_epsilon[:,2].reshape(-1, 1)*length, np.ones((ani_inv_epsilon.shape[0],1))*width]) # lon, lat, angle, length, width
|
||||
idx = np.where(ani_inv_epsilon[:,2] > ani_thd)
|
||||
ani = ani_inv[idx[0],:]
|
||||
|
||||
# --------- plot velocity ------------
|
||||
if idepth == 0:
|
||||
frame = ["xa100","ya1","nSwE"]
|
||||
elif idepth == len(depth_list)-1:
|
||||
frame = ["xa100","ya1","NsWe"]
|
||||
else:
|
||||
frame = ["xa100","ya1","nswe"]
|
||||
|
||||
fig.basemap(region=region_oblique, frame=frame, projection=projection, perspective=perspective)
|
||||
pygmt.makecpt(cmap="../utils/svel13_chen.cpt", series=[-8, 8], background=True, reverse=False)
|
||||
|
||||
x = vel_init[:,0]; y = vel_init[:,1]; value = (vel_inv[:,2] - vel_init[:,2])/vel_init[:,2] * 100
|
||||
y,x = ffd.rtp_rotation_reverse(y,x,central_lat,central_lon,rotation_angle)
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing=spacing,region=region)
|
||||
fig.grdimage(frame=frame,grid = grid,projection=projection, region=region_oblique,perspective=perspective) # nan_transparent may work
|
||||
|
||||
# tectonic setting
|
||||
fig.coast(region=region_oblique, frame=frame, projection=projection, perspective=perspective, shorelines="1p,black") # coastlines
|
||||
(SAFy,SAFx) = line_read("tectonics/SAF")
|
||||
fig.plot(x = SAFx, y = SAFy, pen = '3.0p,black', perspective = perspective) # SAF
|
||||
if idepth == 0:
|
||||
fig.text(text = "SMB", x = -120.45 , y = 35.0, font = "16p,Helvetica-Bold,black", angle = 150, fill = "lightblue", perspective = perspective) # SMB
|
||||
fig.text(text = "FT", x = -120.6 , y = 36.50, font = "16p,Helvetica-Bold,black", angle = 150, fill = "lightblue", perspective = perspective) # Franciscan terrane
|
||||
fig.text(text = "ST", x = -121.1 , y = 36.0, font = "16p,Helvetica-Bold,black", angle = 150, fill = "lightblue", perspective = perspective) # Salinian terrane
|
||||
fig.text(text = "TR", x = -119.30 , y = 34.70, font = "16p,Helvetica-Bold,black", angle = 150, fill = "lightblue", perspective = perspective) # Coast Ranges
|
||||
|
||||
# depth label
|
||||
fig.text(text="%d km"%(depth), x = -119.8 , y = 34.0, font = "16p,Helvetica-Bold,black", angle = 180, fill = "white", perspective = perspective) # Coast Ranges
|
||||
|
||||
|
||||
# --------- plot anisotropy ------------
|
||||
fig.shift_origin(yshift=-12)
|
||||
|
||||
fig.basemap(region=region_oblique, frame=frame, projection=projection, perspective=perspective)
|
||||
pygmt.makecpt(cmap="cool", series=[0, 0.08], background=True)
|
||||
value = epsilon_inv[:,2]
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing=spacing,region=region)
|
||||
fig.grdimage(frame=frame,grid = grid,projection=projection, region=region_oblique,perspective=perspective) # nan_transparent may work
|
||||
|
||||
# tectonic setting
|
||||
fig.coast(region=region_oblique, frame=frame, projection=projection, perspective=perspective, shorelines="1p,black") # coastlines
|
||||
(line_y,line_x) = line_read("tectonics/SAF_creeping")
|
||||
fig.plot(x = line_x, y = line_y, pen = '3.0p,black',perspective = perspective)
|
||||
(line_y,line_x) = line_read("tectonics/SAF_transition")
|
||||
fig.plot(x = line_x, y = line_y, pen = '3.0p,red',perspective = perspective)
|
||||
(line_y,line_x) = line_read("tectonics/SAF_locked")
|
||||
fig.plot(x = line_x, y = line_y, pen = '3.0p,blue',perspective = perspective)
|
||||
|
||||
# anisotropy
|
||||
if len(ani) > 0:
|
||||
# rotate back to original coordinates
|
||||
x = ani[:,0]; y = ani[:,1]
|
||||
y,x = ffd.rtp_rotation_reverse(y,x,central_lat,central_lon,rotation_angle)
|
||||
ani[:,0] = x; ani[:,1] = y; # no need to modify the angle, because the porjection angle and rotate angle are the same
|
||||
fig.plot(ani, style='j', fill='yellow1', pen='0.5p,black',perspective=perspective)
|
||||
|
||||
fig.shift_origin(xshift=6,yshift=12)
|
||||
|
||||
|
||||
fig.show()
|
||||
|
||||
fig.savefig("img/imaging_result.png")
|
||||
|
||||
|
||||
@@ -0,0 +1,45 @@
|
||||
# %%
|
||||
# download src_ref_files from Zenodo
|
||||
import os
|
||||
import requests
|
||||
|
||||
url = 'https://zenodo.org/records/14065341/files/src_rec_file.dat?download=1'
|
||||
|
||||
path = "1_src_rec_files/src_rec_file.dat"
|
||||
|
||||
# check file existence
|
||||
if not os.path.exists(path):
|
||||
try:
|
||||
os.mkdir("1_src_rec_files")
|
||||
except:
|
||||
pass
|
||||
print("Downloading src_rec_file.dat from Zenodo...")
|
||||
response = requests.get(url, stream=True)
|
||||
with open(path, 'wb') as out_file:
|
||||
out_file.write(response.content)
|
||||
print("Download complete.")
|
||||
else:
|
||||
print("src_rec_file.dat already exists.")
|
||||
|
||||
# %%
|
||||
# download initial model from Zenodo
|
||||
|
||||
url = 'https://zenodo.org/records/14065341/files/model_init_N51_89_33.h5?download=1'
|
||||
|
||||
path = "2_models/model_init_N51_89_33.h5"
|
||||
|
||||
# check file existence
|
||||
if not os.path.exists(path):
|
||||
try:
|
||||
os.mkdir("2_models")
|
||||
except:
|
||||
pass
|
||||
print("Downloading model_init_N51_89_33.h5 from Zenodo...")
|
||||
response = requests.get(url, stream=True)
|
||||
with open(path, 'wb') as out_file:
|
||||
out_file.write(response.content)
|
||||
print("Download complete.")
|
||||
else:
|
||||
print("model_init_N51_89_33.h5 already exists.")
|
||||
|
||||
|
||||
@@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Step 1: Generate necessary input files
|
||||
python prepare_input_files.py
|
||||
|
||||
# Step 2: Run inversion
|
||||
# # for WSL
|
||||
# mpirun -n 8 --allow-run-as-root --oversubscribe ../../build/bin/TOMOATT -i 3_input_params/input_params_real.yaml
|
||||
# for Linux
|
||||
# mpirun -n 8 ../../build/bin/TOMOATT -i 3_input_params/input_params_real.yaml
|
||||
# for conda install
|
||||
mpirun -n 8 TOMOATT -i 3_input_params/input_params_real.yaml
|
||||
|
||||
# Step 3 (Optional): Plot the results
|
||||
python plot_output.py
|
||||
|
||||
@@ -0,0 +1,68 @@
|
||||
-122.73566 37.96894
|
||||
-122.70941 37.93895
|
||||
-122.65033 37.88999
|
||||
-122.62177 37.83209
|
||||
-122.59009 37.79618
|
||||
-122.56673 37.74661
|
||||
-122.51982 37.71281
|
||||
-122.47924 37.65959
|
||||
-122.40956 37.58436
|
||||
-122.38051 37.54962
|
||||
-122.32748 37.48461
|
||||
-122.23632 37.38344
|
||||
-122.18852 37.34638
|
||||
-122.15621 37.31470
|
||||
-122.11902 37.26828
|
||||
-122.01512 37.19501
|
||||
-121.92424 37.12444
|
||||
-121.88689 37.10485
|
||||
-121.86086 37.08276
|
||||
-121.78342 37.04335
|
||||
-121.70450 36.97942
|
||||
-121.65426 36.93801
|
||||
-121.57487 36.88348
|
||||
-121.47861 36.81956
|
||||
-121.43712 36.78821
|
||||
-121.40534 36.76781
|
||||
-121.36812 36.75009
|
||||
-121.33557 36.71495
|
||||
-121.27255 36.67184
|
||||
-121.16342 36.57248
|
||||
-121.09358 36.51578
|
||||
-121.02139 36.44378
|
||||
-120.93316 36.35578
|
||||
-120.79679 36.21978
|
||||
-120.63636 36.07582
|
||||
-120.52406 35.97185
|
||||
-120.43583 35.90791
|
||||
-120.33957 35.81993
|
||||
-120.29568 35.74979
|
||||
-120.26271 35.71512
|
||||
-120.23380 35.67390
|
||||
-120.19772 35.63300
|
||||
-120.12299 35.56383
|
||||
-120.02674 35.48387
|
||||
-119.98846 35.44344
|
||||
-119.88017 35.32590
|
||||
-119.78610 35.24387
|
||||
-119.67380 35.14792
|
||||
-119.53743 35.04400
|
||||
-119.40726 34.94926
|
||||
-119.34920 34.92597
|
||||
-119.31423 34.90995
|
||||
-119.24588 34.88266
|
||||
-119.20791 34.87552
|
||||
-119.16832 34.87107
|
||||
-119.12954 34.86300
|
||||
-119.09076 34.86210
|
||||
-118.97416 34.84129
|
||||
-118.93565 34.84176
|
||||
-118.85999 34.82469
|
||||
-118.78894 34.79661
|
||||
-118.72217 34.76987
|
||||
-118.68535 34.75631
|
||||
-118.56902 34.72084
|
||||
-118.49005 34.70185
|
||||
-118.39037 34.66204
|
||||
-118.22995 34.61434
|
||||
-118.06150 34.54260
|
||||
@@ -0,0 +1,10 @@
|
||||
-121.36812 36.75009
|
||||
-121.33557 36.71495
|
||||
-121.27255 36.67184
|
||||
-121.16342 36.57248
|
||||
-121.09358 36.51578
|
||||
-121.02139 36.44378
|
||||
-120.93316 36.35578
|
||||
-120.79679 36.21978
|
||||
-120.63636 36.07582
|
||||
-120.52406 35.97185
|
||||
@@ -0,0 +1,10 @@
|
||||
-120.26271 35.71512
|
||||
-120.23380 35.67390
|
||||
-120.19772 35.63300
|
||||
-120.12299 35.56383
|
||||
-120.02674 35.48387
|
||||
-119.98846 35.44344
|
||||
-119.88017 35.32590
|
||||
-119.78610 35.24387
|
||||
-119.67380 35.14792
|
||||
-119.53743 35.04400
|
||||
@@ -0,0 +1,5 @@
|
||||
-120.52406 35.97185
|
||||
-120.43583 35.90791
|
||||
-120.33957 35.81993
|
||||
-120.29568 35.74979
|
||||
-120.26271 35.71512
|
||||
BIN
examples/realcase2_teleseismic_tomography_Thailand/.DS_Store
vendored
Normal file
BIN
examples/realcase2_teleseismic_tomography_Thailand/.DS_Store
vendored
Normal file
Binary file not shown.
@@ -0,0 +1,187 @@
|
||||
version: 3
|
||||
|
||||
#################################################
|
||||
# computational domian #
|
||||
#################################################
|
||||
domain:
|
||||
min_max_dep: [-50, 550] # depth in km
|
||||
min_max_lat: [10.5, 22.5] # latitude in degree
|
||||
min_max_lon: [95.5, 107.5] # longitude in degree
|
||||
n_rtp: [121, 121, 121] # number of nodes in depth,latitude,longitude direction
|
||||
|
||||
#################################################
|
||||
# traveltime data file path #
|
||||
#################################################
|
||||
source:
|
||||
src_rec_file: 1_src_rec_files/src_rec_file.dat # source receiver file path
|
||||
swap_src_rec: false # swap source and receiver
|
||||
|
||||
#################################################
|
||||
# initial model file path #
|
||||
#################################################
|
||||
model:
|
||||
init_model_path: 2_models/model_init_N121_121_121.h5 # path to initial model file
|
||||
|
||||
#################################################
|
||||
# parallel computation settings #
|
||||
#################################################
|
||||
parallel: # parameters for parallel computation
|
||||
n_sims: 8 # number of simultanoues runs (parallel the sources)
|
||||
ndiv_rtp: [1, 1, 1] # number of subdivision on each direction (parallel the computional domain)
|
||||
|
||||
############################################
|
||||
# output file setting #
|
||||
############################################
|
||||
output_setting:
|
||||
output_dir: OUTPUT_FILES/OUTPUT_FILES_real # path to output director (default is ./OUTPUT_FILES/)
|
||||
output_source_field: false # True: output the traveltime field and adjoint field of all sources at each iteration. Default: false. File: 'out_data_sim_group_X'.
|
||||
output_kernel: false
|
||||
output_final_model: true # True: output merged final model. This file can be used as the input model for TomoATT. Default: true. File: 'model_final.h5'.
|
||||
output_middle_model: false # True: output merged intermediate models during inversion. This file can be used as the input model for TomoATT. Default: false. File: 'middle_model_step_XXXX.h5'
|
||||
output_in_process: false # True: output at each inv iteration, otherwise, only output step 0, Niter-1, Niter. Default: true. File: 'out_data_sim_group_0'.
|
||||
output_in_process_data: false # True: output src_rec_file at each inv iteration, otherwise, only output step 0, Niter-2, Niter-1. Default: true. File: 'src_rec_file_step_XXXX.dat'
|
||||
single_precision_output: false # True: output results in single precision. Default: false.
|
||||
verbose_output_level: 0 # output internal parameters, (to do)
|
||||
output_file_format: 0 # 0: hdf5, 1: ascii
|
||||
|
||||
# output files:
|
||||
# File: 'out_data_grid.h5'. Keys: ['Mesh']['elem_conn'], element index;
|
||||
# ['Mesh']['node_coords_p'], phi coordinates of nodes;
|
||||
# ['Mesh']['node_coords_t'], theta coordinates of nodes;
|
||||
# ['Mesh']['node_coords_r'], r coordinates of nodes;
|
||||
# ['Mesh']['node_coords_x'], phi coordinates of elements;
|
||||
# ['Mesh']['node_coords_y'], theta coordinates of elements;
|
||||
# ['Mesh']['node_coords_z'], r coordinates of elements;
|
||||
# File: 'out_data_sim_group_0'. Keys: ['model']['vel_inv_XXXX'], velocity model at iteration XXXX;
|
||||
# ['model']['xi_inv_XXXX'], xi model at iteration XXXX;
|
||||
# ['model']['eta_inv_XXXX'], eta model at iteration XXXX
|
||||
# ['model']['Ks_inv_XXXX'], sensitivity kernel related to slowness at iteration XXXX
|
||||
# ['model']['Kxi_inv_XXXX'], sensitivity kernel related to xi at iteration XXXX
|
||||
# ['model']['Keta_inv_XXXX'], sensitivity kernel related to eta at iteration XXXX
|
||||
# ['model']['Ks_density_inv_XXXX'], kernel density of Ks at iteration XXXX
|
||||
# ['model']['Kxi_density_inv_XXXX'], kernel density of Kxi at iteration XXXX
|
||||
# ['model']['Keta_density_inv_XXXX'], kernel density of Keta at iteration XXXX
|
||||
# ['model']['Ks_over_Kden_inv_XXXX'], slowness kernel over kernel density at iteration XXXX
|
||||
# ['model']['Kxi_over_Kden_inv_XXXX'], xi kernel over kernel density at iteration XXXX
|
||||
# ['model']['Keta_over_Kden_inv_XXXX'], eta kernel over kernel density at iteration XXXX
|
||||
# ['model']['Ks_update_inv_XXXX'], slowness kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['model']['Kxi_update_inv_XXXX'], xi kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['model']['Keta_update_inv_XXXX'], eta kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['1dinv']['vel_1dinv_inv_XXXX'], 2d velocity model at iteration XXXX, in 1d inversion mode
|
||||
# ['1dinv']['r_1dinv'], r coordinates (depth), in 1d inversion mode
|
||||
# ['1dinv']['t_1dinv'], t coordinates (epicenter distance), in 1d inversion mode
|
||||
# File: 'src_rec_file_step_XXXX.dat' or 'src_rec_file_forward.dat'. The synthetic traveltime data file.
|
||||
# File: 'final_model.h5'. Keys: ['eta'], ['xi'], ['vel'], the final model.
|
||||
# File: 'middle_model_step_XXXX.h5'. Keys: ['eta'], ['xi'], ['vel'], the model at step XXXX.
|
||||
# File: 'inversion_grid.txt'. The location of inversion grid nodes
|
||||
# File: 'objective_function.txt'. The objective function value at each iteration
|
||||
# File: 'out_data_sim_group_X'. Keys: ['src_YYYY']['time_field_inv_XXXX'], traveltime field of source YYYY at iteration XXXX;
|
||||
# ['src_YYYY']['adjoint_field_inv_XXXX'], adjoint field of source YYYY at iteration XXXX;
|
||||
# ['1dinv']['time_field_1dinv_YYYY_inv_XXXX'], 2d traveltime field of source YYYY at iteration XXXX, in 1d inversion mode
|
||||
# ['1dinv']['adjoint_field_1dinv_YYYY_inv_XXXX'], 2d adjoint field of source YYYY at iteration XXXX, in 1d inversion mode
|
||||
|
||||
|
||||
#################################################
|
||||
# inversion or forward modeling #
|
||||
#################################################
|
||||
# run mode
|
||||
# 0 for forward simulation only,
|
||||
# 1 for inversion
|
||||
# 2 for earthquake relocation
|
||||
# 3 for inversion + earthquake relocation
|
||||
# 4 for 1d model inversion
|
||||
run_mode: 1
|
||||
|
||||
have_tele_data: true # An error will be reported if false but source out of study region is used. Default: false.
|
||||
ignore_velocity_discontinuity: true # An error will be reported if false but there is velocity discontinuity (v[ix,iy,iz+1] > v[ix,iy,iz] * 1.2 or v[ix,iy,iz+1] < v[ix,iy,iz] * 0.8) in the input model. Default: false.
|
||||
|
||||
###################################################
|
||||
# model update parameters setting #
|
||||
###################################################
|
||||
model_update:
|
||||
max_iterations: 80 # maximum number of inversion iterations
|
||||
|
||||
step_length: 0.01 # the initial step length of model perturbation. 0.01 means maximum 1% perturbation for each iteration.
|
||||
|
||||
# parameters for optim_method 0 (gradient_descent)
|
||||
optim_method_0:
|
||||
# if step_method:1. if the angle between the current and the previous gradients is greater than step_length_gradient_angle, step size -> step length * step_length_change[0].
|
||||
# otherwise, step size -> step length * step_length_change[1].
|
||||
step_length_gradient_angle: 120 # default: 120.0
|
||||
step_length_change: [0.5, 1.41] # default: [0.5,1.2]
|
||||
Kdensity_coe: 0.3 # default: 0.0, range: 0.0 - 1.0
|
||||
|
||||
# parameters for smooth method 0 (multigrid model parametrization)
|
||||
# inversion grid can be viewed in OUTPUT_FILES/inversion_grid.txt
|
||||
n_inversion_grid: 5 # number of inversion grid sets
|
||||
|
||||
uniform_inv_grid_dep: false # true if use uniform inversion grid for dep, false if use flexible inversion grid
|
||||
uniform_inv_grid_lat: false # true if use uniform inversion grid for lat, false if use flexible inversion grid
|
||||
uniform_inv_grid_lon: false # true if use uniform inversion grid for lon, false if use flexible inversion grid
|
||||
|
||||
# settings for flexible inversion grid
|
||||
dep_inv: [-50, -25, 0, 50, 100, 150, 200, 275, 350, 425, 500, 600,700] # inversion grid for vel in depth (km)
|
||||
lat_inv: [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] # inversion grid for vel in latitude (degree)
|
||||
lon_inv: [95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108] # inversion grid for vel in longitude (degree)
|
||||
trapezoid: [1, 0, 50] # usually set as [1.0, 0.0, 50.0] (default)
|
||||
|
||||
# if we want to use another inversion grid for inverting anisotropy, set invgrid_ani: true (default: false)
|
||||
invgrid_ani: false
|
||||
|
||||
# ---------- flexible inversion grid setting for anisotropy ----------
|
||||
# settings for flexible inversion grid for anisotropy
|
||||
dep_inv_ani: [-5, -2, 0, 3, 7, 12, 17, 23, 30, 38, 47, 57] # inversion grid for ani in depth (km)
|
||||
lat_inv_ani: [-2.8, -2.3, -1.8, -1.3, -0.8, -0.3, 0.2, 0.7, 1.2, 1.7, 2.2, 2.7] # inversion grid for ani in latitude (degree)
|
||||
lon_inv_ani: [-1.2, -0.9, -0.6, -0.3, 0, 0.3, 0.6, 0.9, 1.2] # inversion grid for ani in longitude (degree)
|
||||
trapezoid_ani: [1, 0, 50] # usually set as [1.0, 0.0, 50.0] (default)
|
||||
|
||||
|
||||
# Carefully change trapezoid and trapezoid_ani, if you really want to use trapezoid inversion grid, increasing the inversion grid spacing with depth to account for the worse data coverage in greater depths.
|
||||
# The trapezoid_ inversion grid with index (i,j,k) in longitude, latitude, and depth is defined as:
|
||||
# if dep_inv[k] < trapezoid[1], lon = lon_inv[i];
|
||||
# lat = lat_inv[j];
|
||||
# dep = dep_inv[k];
|
||||
# if trapezoid[1] <= dep_inv[k] < trapezoid[2], lon = mid_lon_inv+(lon_inv[i]-mid_lon_inv)*(dep_inv[k]-trapezoid[1])/(trapezoid[2]-trapezoid[1])*trapezoid[0];
|
||||
# lat = mid_lat_inv+(lat_inv[i]-mid_lat_inv)*(dep_inv[k]-trapezoid[1])/(trapezoid[2]-trapezoid[1])*trapezoid[0];
|
||||
# dep = dep_inv[k];
|
||||
# if trapezoid[2] <= dep_inv[k], lon = mid_lon_inv+(lon_inv[i]-mid_lon_inv)*trapezoid[0];
|
||||
# lat = mid_lat_inv+(lat_inv[i]-mid_lat_inv)*trapezoid[0];
|
||||
# dep = dep_inv[k];
|
||||
# The shape of trapezoid inversion gird (x) looks like:
|
||||
#
|
||||
# lon_inv[0] [1] [2] [3] [4]
|
||||
# |<-------- (lon_inv[end] - lon_inv[0]) ---->|
|
||||
# dep_inv[0] | x x x x x |
|
||||
# | |
|
||||
# dep_inv[1] | x x x x x |
|
||||
# | |
|
||||
# dep_inv[2] = trapezoid[1] / x x x x x \
|
||||
# / \
|
||||
# dep_inv[3] / x x x x x \
|
||||
# / \
|
||||
# dep_inv[4] = trapezoid[2] / x x x x x \
|
||||
# | |
|
||||
# dep_inv[5] | x x x x x |
|
||||
# | |
|
||||
# dep_inv[6] | x x x x x |
|
||||
# |<---- trapezoid[0]* (lon_inv[end] - lon_inv[0]) ------>|
|
||||
|
||||
# -------------- using absolute traveltime data --------------
|
||||
abs_time:
|
||||
use_abs_time: false # 'true' for using absolute traveltime data to update model parameters; 'false' for not using (no need to set parameters in this section)
|
||||
|
||||
# -------------- using common source differential traveltime data --------------
|
||||
cs_dif_time:
|
||||
use_cs_time: true # 'true' for using common source differential traveltime data to update model parameters; 'false' for not using (no need to set parameters in this section)
|
||||
|
||||
# -------------- using common receiver differential traveltime data --------------
|
||||
cr_dif_time:
|
||||
use_cr_time: false # 'true' for using common receiver differential traveltime data to update model parameters; 'false' for not using (no need to set parameters in this section)
|
||||
|
||||
# -------------- inversion parameters --------------
|
||||
update_slowness : true # update slowness (velocity) or not. default: true
|
||||
update_azi_ani : false # update azimuthal anisotropy (xi, eta) or not. default: false
|
||||
|
||||
use_sta_correction: true
|
||||
# initial_sta_correction_file: 4_initial_station_correct/station_correction_file_step_0010.dat # the path of initial station correction
|
||||
step_length_sta_correction: 0.01 # step length relate to the update of station correction terms
|
||||
29
examples/realcase2_teleseismic_tomography_Thailand/README.md
Normal file
29
examples/realcase2_teleseismic_tomography_Thailand/README.md
Normal file
@@ -0,0 +1,29 @@
|
||||
# Real case of teleseismic tomography in Thailand and adjacent areas
|
||||
|
||||
This is a real case to invert common-source differential arrival times for velocity heterogeneity in Thailand and adjacent areas
|
||||
|
||||
Reference:
|
||||
|
||||
[1] J. Chen, S. Wu, M. Xu, M. Nagaso, J. Yao, K. Wang, T. Li, Y. Bai, and P. Tong, Adjoint-state teleseismic traveltime tomography: method and application to Thailand in Indochina Peninsula. J.Geophys. Res. Solid Earth, 128(2023), e2023JB027348.
|
||||
https://doi.org/10.1029/2023JB027348
|
||||
|
||||
[2] J. Chen, M. Nagaso, M. Xu, and P. Tong, TomoATT: An open-source package for Eikonal equation-based adjoint-state traveltime tomography for seismic velocity and azimuthal anisotropy, submitted.
|
||||
https://doi.org/10.48550/arXiv.2412.00031
|
||||
|
||||
Python modules are required to initiate the inversion and to plot final results:
|
||||
- h5py
|
||||
- PyTomoAT
|
||||
- Pygmt
|
||||
- gmt
|
||||
|
||||
Run this example:
|
||||
|
||||
1. Run bash script `bash run_this_example.sh` to execute the test.
|
||||
|
||||
2. After inversion, run `plot_output.py` to plot the results.
|
||||
|
||||
The imaging results:
|
||||
|
||||

|
||||
|
||||
|
||||
@@ -0,0 +1,260 @@
|
||||
# %%
|
||||
import pygmt
|
||||
pygmt.config(FONT="16p", IO_SEGMENT_MARKER="<<<")
|
||||
|
||||
import os
|
||||
|
||||
# %%
|
||||
from pytomoatt.model import ATTModel
|
||||
from pytomoatt.data import ATTData
|
||||
import numpy as np
|
||||
|
||||
# %%
|
||||
# read model files
|
||||
|
||||
Ngrid = [121,121,121]
|
||||
data_file = '2_models/model_init_N%d_%d_%d.h5'%(Ngrid[0],Ngrid[1],Ngrid[2])
|
||||
par_file = '3_input_params/input_params_real.yaml'
|
||||
model = ATTModel.read(data_file, par_file)
|
||||
init_model = model.to_xarray()
|
||||
|
||||
data_file = 'OUTPUT_FILES/OUTPUT_FILES_real/final_model.h5'
|
||||
model = ATTModel.read(data_file, par_file)
|
||||
inv_model = model.to_xarray()
|
||||
|
||||
# %%
|
||||
# # read earthquakes and stations
|
||||
|
||||
# from pytomoatt.src_rec import SrcRec
|
||||
|
||||
# # read src_rec_file
|
||||
# sr = SrcRec.read("1_src_rec_files/src_rec_file.dat")
|
||||
|
||||
# # get the coordinates of the stations and earthquakes
|
||||
# stations = sr.receivers[['stlo','stla','stel']].values.T
|
||||
# earthquakes = sr.sources[['evlo','evla','evdp']].values.T
|
||||
|
||||
# print(stations.shape)
|
||||
# print(earthquakes.shape)
|
||||
|
||||
# %%
|
||||
# read earthquakes and stations
|
||||
|
||||
import sys
|
||||
sys.path.append('../utils')
|
||||
import functions_for_data as ffd
|
||||
|
||||
ev, st = ffd.read_src_rec_file('1_src_rec_files/src_rec_file.dat')
|
||||
# earthquake location
|
||||
ev_lon, ev_lat, ev_dep , _ = ffd.data_lon_lat_dep_wt_ev(ev)
|
||||
# station location
|
||||
st_lon, st_lat, _, _ = ffd.data_lon_lat_ele_wt_st(ev,st)
|
||||
|
||||
# %%
|
||||
# load topography
|
||||
region = [97,106,12,21]
|
||||
grid_topo = pygmt.datasets.load_earth_relief(resolution="01m", region=region)
|
||||
grid_gra = pygmt.grdgradient(grid = grid_topo, azimuth = 0)
|
||||
|
||||
# %%
|
||||
def line_read(file):
|
||||
doc=open(file,'r')
|
||||
file = doc.readlines()
|
||||
doc.close()
|
||||
lat = []; lon = [];
|
||||
for info in file:
|
||||
tmp = info.split()
|
||||
lon.append(float(tmp[0]))
|
||||
lat.append(float(tmp[1]))
|
||||
return((lat,lon))
|
||||
|
||||
# %%
|
||||
# plot imgaing results
|
||||
|
||||
fig = pygmt.Figure()
|
||||
try:
|
||||
os.mkdir("img")
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
# ------------------ Sub fig 1. topography ------------------
|
||||
region = [97,106,12,21]
|
||||
projection = "B101.5/16.5/12/21/10c"
|
||||
frame = ["xa2+lLongitude", "ya2+lLatitude", "nSWe"]
|
||||
spacing = [0.1, 0.1]
|
||||
|
||||
# topography
|
||||
pygmt.makecpt(cmap="globe", series=[-4000,4000], background = True)
|
||||
fig.grdimage(grid=grid_topo, shading = grid_gra, projection=projection, frame=frame,region=region)
|
||||
|
||||
# station
|
||||
fig.plot(x = st_lon, y = st_lat, style = "t0.4c", fill = "blue", pen = "1p,white", label = "Station")
|
||||
|
||||
|
||||
# tectonic setting
|
||||
(lat,lon) = line_read('tectonics/Khorat_new.txt') # Khorat Plateau
|
||||
fig.plot(x = lon, y = lat, pen = "1p,black")
|
||||
fig.text(text = "Khorat", x = 103.5, y = 17.5, font = "15p,Helvetica-Bold,black", angle = 0)
|
||||
fig.text(text = "Plateau", x = 103.5, y = 16.5, font = "15p,Helvetica-Bold,black", angle = 0)
|
||||
(lat,lon) = line_read('tectonics/WangChaoFault.txt') # Wang-Chao Fault
|
||||
fig.plot(x = lon, y = lat, pen = "1p,black,-")
|
||||
fig.text(text = "WCF", x = 100, y = 16, font = "20p,Helvetica-Bold,white=1p", angle = 315)
|
||||
(lat,lon) = line_read('tectonics/3PagodasFault.txt') # Three Pagodas Fault
|
||||
fig.plot(x = lon, y = lat, pen = "1p,black,-")
|
||||
fig.text(text = "TPF", x = 98.5, y = 14.5, font = "20p,Helvetica-Bold,white=1p", angle = 315)
|
||||
(lat,lon) = line_read('tectonics/DBPF.txt') # Dien Bien Phu Fault
|
||||
fig.plot(x = lon, y = lat, pen = "1p,black,-")
|
||||
fig.text(text = "DBPF", x = 102, y = 19.5, font = "20p,Helvetica-Bold,white=1p", angle = 55)
|
||||
(lat,lon) = line_read('tectonics/SongMaSuture.txt') # Song Ma Suture
|
||||
fig.plot(x = lon, y = lat, pen = "1p,black,-")
|
||||
fig.text(text = "Shan-Thai", x = 99, y = 20, font = "18p,Helvetica-Bold,black=0.5p,white", angle = 0)
|
||||
fig.text(text = "Block", x = 99, y = 19.3, font = "18p,Helvetica-Bold,black=0.5p,white", angle = 0)
|
||||
fig.text(text = "Indochina Block", x = 103.5, y = 15, font = "18p,Helvetica-Bold,black=0.5p,white", angle = 315)
|
||||
|
||||
|
||||
fig.shift_origin(xshift= 1, yshift=-1.5)
|
||||
fig.colorbar(frame = ["a%f"%(4000),"y+lElevation (m)"], position="+w4c/0.3c+h")
|
||||
fig.shift_origin(xshift=-1, yshift=+1.5)
|
||||
|
||||
fig.shift_origin(xshift=0, yshift=-13)
|
||||
|
||||
|
||||
# ------------------ Sub fig 2. earthquakes ------------------
|
||||
|
||||
region = "g"
|
||||
projection = "E101/16/90/10c" # centerlon/centerlat/distnace_range/fig_size
|
||||
frame = ["ya180"]
|
||||
spacing = [0.1, 5]
|
||||
|
||||
fig.basemap(region=region, projection=projection, frame=frame)
|
||||
fig.coast(region=region, projection=projection, frame=frame, water="white", land="gray", A=10000)
|
||||
|
||||
fig.plot(x=101.5, y=16.5, pen="1p,black,-", style="E-60d")
|
||||
fig.plot(x=101.5, y=16.5, pen="1p,black,-", style="E-120d")
|
||||
fig.plot(x=101.5, y=16.5, pen="1p,black,-", style="E-180d")
|
||||
fig.text(x = [101.5, 101.5, 101.5], y = [-8.0, -38.0, -68], text = ['30', '60', '90'], font="13p")
|
||||
|
||||
fig.plot(x=[97,97,106,106,97], y=[11,21,21,11,11], pen="1p,red")
|
||||
pygmt.makecpt(cmap="jet", series=[0, 100], background=True)
|
||||
fig.plot(x = ev_lon, y = ev_lat, size = [0.4]*len(ev_lon), fill = ev_dep, style = "a", cmap = True, pen = "0.5p,white")
|
||||
|
||||
|
||||
fig.shift_origin(xshift= 1, yshift=-1)
|
||||
fig.colorbar(frame = ["a%f"%(50),"y+lFocal depth (km)"], position="+w4c/0.3c+h")
|
||||
fig.shift_origin(xshift=-1, yshift=+1)
|
||||
|
||||
fig.shift_origin(xshift= 13, yshift= 13)
|
||||
|
||||
|
||||
# ------------------ Sub fig 3. depth and vertical profile ------------------
|
||||
|
||||
region = [97,106,12,21]
|
||||
projection = "B101.5/16.5/12/21/10c"
|
||||
frame = ["xa2+lLongitude", "ya2+lLatitude", "nSWe"]
|
||||
spacing = [0.1, 0.1]
|
||||
|
||||
depth_list = [100,200,300,400]
|
||||
start_list = [ [97, 19], [97, 17.2], [101.8, 12] ]
|
||||
end_list = [ [106, 15], [106, 13.2], [101.8, 21] ]
|
||||
|
||||
# depth profiles
|
||||
for idepth, depth in enumerate(depth_list):
|
||||
|
||||
# read models
|
||||
vel_init = init_model.interp_dep(depth, field='vel')
|
||||
vel_inv = inv_model.interp_dep(depth, field='vel')
|
||||
|
||||
if idepth == 3:
|
||||
fig.shift_origin(xshift=-39, yshift=-13)
|
||||
|
||||
pygmt.makecpt(cmap="../utils/svel13_chen.cpt", series=[-2, 2], background=True)
|
||||
x = vel_inv[:,0]; y = vel_inv[:,1]; value = (vel_inv[:,2] - vel_init[:,2])/vel_init[:,2]*100
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing=spacing,region=region)
|
||||
fig.grdimage(frame=frame,grid = grid,projection=projection, region=region) # nan_transparent may work
|
||||
|
||||
(lat,lon) = line_read('tectonics/Khorat_new.txt') # Khorat Plateau
|
||||
fig.plot(x = lon, y = lat, pen = "1p,black")
|
||||
|
||||
# vertical profile location
|
||||
fig.plot(x = [start_list[0][0],end_list[0][0]], y = [start_list[0][1],end_list[0][1],], pen = "2p,green,-")
|
||||
fig.text(text = "A", x = start_list[0][0] + 0.5, y = start_list[0][1], font = "18p,Helvetica-Bold,black", fill = "lightblue", angle = 0)
|
||||
fig.text(text = "A@+'@+", x = end_list[0][0] - 0.5, y = end_list[0][1] + 0.5, font = "18p,Helvetica-Bold,black", fill = "lightblue", angle = 0)
|
||||
|
||||
fig.plot(x = [start_list[1][0],end_list[1][0]], y = [start_list[1][1],end_list[1][1],], pen = "2p,green,-")
|
||||
fig.text(text = "B", x = start_list[1][0] + 0.5, y = start_list[1][1], font = "18p,Helvetica-Bold,black", fill = "lightblue", angle = 0)
|
||||
fig.text(text = "B@+'@+", x = end_list[1][0] - 0.5, y = end_list[1][1] + 0.5, font = "18p,Helvetica-Bold,black", fill = "lightblue", angle = 0)
|
||||
|
||||
fig.plot(x = [start_list[2][0],end_list[2][0]], y = [start_list[2][1],end_list[2][1],], pen = "2p,green,-")
|
||||
fig.text(text = "C", x = start_list[2][0] - 0.5, y = start_list[2][1] + 0.5, font = "18p,Helvetica-Bold,black", fill = "lightblue", angle = 0)
|
||||
fig.text(text = "C@+'@+", x = end_list[2][0] - 0.5, y = end_list[2][1] - 0.5, font = "18p,Helvetica-Bold,black", fill = "lightblue", angle = 0)
|
||||
|
||||
# depth label
|
||||
fig.text(text="%d km"%(depth), x = 98 , y = 12.5, font = "14p,Helvetica-Bold,black", fill = "white")
|
||||
|
||||
|
||||
fig.shift_origin(xshift=13)
|
||||
|
||||
fig.shift_origin(yshift=6)
|
||||
|
||||
# vertical profiles
|
||||
for iprof in range(len(start_list)):
|
||||
|
||||
# generate topography data
|
||||
Npoints = 100
|
||||
point_x = np.linspace(start_list[iprof][0],end_list[iprof][0],Npoints)
|
||||
point_y = np.linspace(start_list[iprof][1],end_list[iprof][1],Npoints)
|
||||
points = np.hstack((point_x.reshape(-1,1),point_y.reshape(-1,1)))
|
||||
topo = np.array(pygmt.grdtrack(points=points, grid=grid_topo)[2])
|
||||
topo_dis = [0]
|
||||
for ip in range(1, Npoints):
|
||||
dis = ffd.cal_dis(point_y[0], point_x[0], point_y[ip], point_x[ip])
|
||||
topo_dis.append(dis)
|
||||
topo_dis = np.array(topo_dis)
|
||||
|
||||
# read models
|
||||
vel_init_sec = init_model.interp_sec(start_list[iprof], end_list[iprof], field='vel', val=10)
|
||||
vel_inv_sec = inv_model.interp_sec(start_list[iprof], end_list[iprof], field='vel', val=10)
|
||||
|
||||
|
||||
# plot topography
|
||||
max_dis = np.max(vel_init_sec[:,2])
|
||||
|
||||
region = [0,max_dis,0,2000]
|
||||
projection = "X%f/1c"%(max_dis/400*4)
|
||||
frame = ["ya2000+lElevation (m)", "sW"]
|
||||
|
||||
fig.shift_origin(yshift=4)
|
||||
fig.basemap(region=region, projection=projection, frame=frame)
|
||||
fig.plot(x = topo_dis, y = topo, pen = "1p,black", frame = frame, projection = projection, region = region)
|
||||
fig.shift_origin(yshift=-4)
|
||||
|
||||
# plot model
|
||||
region = [0,max_dis,0,400]
|
||||
projection = "X%f/-4c"%(max_dis/400*4)
|
||||
frame = ["xa300+lDistance (km)", "ya100+lDepth (km)", "nSWe"]
|
||||
spacing = [10, 5]
|
||||
|
||||
x_sec = vel_inv_sec[:,2]; y_sec = vel_inv_sec[:,3]; value_sec = (vel_inv_sec[:,4] - vel_init_sec[:,4])/vel_init_sec[:,4]*100
|
||||
grid = pygmt.surface(x=x_sec, y=y_sec, z=value_sec, spacing=spacing,region=region)
|
||||
fig.grdimage(frame=frame,grid = grid,projection=projection, region=region) # nan_transparent may work
|
||||
|
||||
label_list = ['A', 'B', 'C']
|
||||
fig.text(text = "%s"%(label_list[iprof]), x = 50, y = 50 , font = "18p,Helvetica-Bold,black", fill = "lightblue", angle = 0)
|
||||
fig.text(text = "%s@+'@+"%(label_list[iprof]), x = np.max(x) - 50, y = 50, font = "18p,Helvetica-Bold,black", fill = "lightblue", angle = 0)
|
||||
|
||||
|
||||
if (iprof == 0):
|
||||
fig.shift_origin(xshift=0, yshift=-6.5)
|
||||
elif (iprof == 1):
|
||||
fig.shift_origin(xshift=13, yshift=6.5)
|
||||
|
||||
fig.shift_origin(xshift= 2, yshift=-2.5)
|
||||
fig.colorbar(frame = ["a%f"%(2),"y+ldlnVp (%)"], position="+w4c/0.3c+h")
|
||||
fig.shift_origin(xshift=-2, yshift=+2.5)
|
||||
|
||||
fig.show()
|
||||
|
||||
fig.savefig("img/imaging_result.png")
|
||||
|
||||
|
||||
@@ -0,0 +1,45 @@
|
||||
# %%
|
||||
# download src_ref_files from Zenodo
|
||||
import os
|
||||
import requests
|
||||
|
||||
url = 'https://zenodo.org/records/14092478/files/src_rec_file.dat?download=1'
|
||||
|
||||
path = "1_src_rec_files/src_rec_file.dat"
|
||||
|
||||
# check file existence
|
||||
if not os.path.exists(path):
|
||||
try:
|
||||
os.mkdir("1_src_rec_files")
|
||||
except:
|
||||
pass
|
||||
print("Downloading src_rec_file.dat from Zenodo...")
|
||||
response = requests.get(url, stream=True)
|
||||
with open(path, 'wb') as out_file:
|
||||
out_file.write(response.content)
|
||||
print("Download complete.")
|
||||
else:
|
||||
print("src_rec_file.dat already exists.")
|
||||
|
||||
# %%
|
||||
# download initial model from Zenodo
|
||||
|
||||
url = 'https://zenodo.org/records/14092478/files/model_init_N121_121_121.h5?download=1'
|
||||
|
||||
path = "2_models/model_init_N121_121_121.h5"
|
||||
|
||||
# check file existence
|
||||
if not os.path.exists(path):
|
||||
try:
|
||||
os.mkdir("2_models")
|
||||
except:
|
||||
pass
|
||||
print("Downloading model_init_N121_121_121.h5 from Zenodo...")
|
||||
response = requests.get(url, stream=True)
|
||||
with open(path, 'wb') as out_file:
|
||||
out_file.write(response.content)
|
||||
print("Download complete.")
|
||||
else:
|
||||
print("model_init_N121_121_121.h5 already exists.")
|
||||
|
||||
|
||||
@@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Step 1: Generate necessary input files
|
||||
python prepare_input_files.py
|
||||
|
||||
# Step 2: Run inversion
|
||||
# # for WSL
|
||||
# mpirun -n 8 --allow-run-as-root --oversubscribe ../../build/bin/TOMOATT -i 3_input_params/input_params_real.yaml
|
||||
# for Linux
|
||||
# mpirun -n 8 ../../build/bin/TOMOATT -i 3_input_params/input_params_real.yaml
|
||||
# for conda install
|
||||
mpirun -n 8 TOMOATT -i 3_input_params/input_params_real.yaml
|
||||
|
||||
# # Step 3 (Optional): Plot the results
|
||||
# python plot_output.py
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
97.716435 15.996933
|
||||
97.909999 15.704275
|
||||
98.075912 15.388543
|
||||
98.283303 15.065462
|
||||
98.504520 14.792299
|
||||
98.753389 14.414982
|
||||
99.306431 13.919170
|
||||
99.569126 13.657715
|
||||
99.831821 13.344572
|
||||
@@ -0,0 +1,12 @@
|
||||
100.26019 17.132888
|
||||
100.51411 17.490642
|
||||
100.79624 17.903402
|
||||
101.10658 18.261329
|
||||
101.47335 18.591967
|
||||
101.75549 19.004727
|
||||
102.00940 19.362481
|
||||
102.29154 19.802702
|
||||
102.51724 20.242749
|
||||
102.74295 20.627877
|
||||
102.91223 21.122673
|
||||
103.02508 21.919356
|
||||
@@ -0,0 +1,74 @@
|
||||
101.3217 15.25436
|
||||
101.3610 15.02633
|
||||
101.4085 14.66495
|
||||
101.6319 14.56644
|
||||
101.6012 14.38869
|
||||
101.4514 14.26901
|
||||
101.7127 14.13520
|
||||
101.9088 14.04399
|
||||
102.1729 14.00737
|
||||
102.4503 14.00100
|
||||
102.7636 14.00578
|
||||
103.0485 14.13474
|
||||
103.3082 14.27712
|
||||
103.5336 14.33471
|
||||
103.7845 14.37077
|
||||
103.9997 14.34649
|
||||
104.2485 14.32079
|
||||
104.4734 14.33280
|
||||
104.6885 14.37674
|
||||
104.9102 14.34013
|
||||
105.1688 14.31806
|
||||
105.3968 14.38630
|
||||
105.5182 14.48660
|
||||
105.5693 14.77796
|
||||
105.8248 14.91528
|
||||
106.1238 14.87758
|
||||
106.3348 14.64013
|
||||
106.4688 14.60983
|
||||
106.7040 14.63849
|
||||
106.9841 14.69472
|
||||
107.1072 14.88782
|
||||
106.9818 15.16007
|
||||
106.8467 15.43551
|
||||
106.7018 15.70776
|
||||
106.4761 15.91474
|
||||
106.7336 16.04609
|
||||
106.9007 16.14401
|
||||
106.9118 16.31668
|
||||
106.7038 16.57524
|
||||
106.4385 16.64211
|
||||
106.2010 16.67623
|
||||
106.0120 16.46044
|
||||
105.8274 16.65030
|
||||
105.8194 16.91355
|
||||
105.5656 17.01331
|
||||
105.2865 17.09928
|
||||
104.9742 17.44792
|
||||
104.8155 17.65625
|
||||
104.6904 17.84917
|
||||
104.4882 18.06029
|
||||
104.2648 18.23726
|
||||
104.2338 18.42014
|
||||
104.0404 18.69778
|
||||
103.8294 18.71916
|
||||
103.6035 18.82287
|
||||
103.4194 18.79012
|
||||
103.1891 18.75828
|
||||
102.9576 18.78853
|
||||
102.7519 18.78060
|
||||
102.4620 18.70688
|
||||
102.2884 18.31131
|
||||
102.3415 17.97336
|
||||
102.3233 17.61750
|
||||
102.3959 17.36994
|
||||
102.5198 17.16424
|
||||
102.4485 16.89594
|
||||
102.2198 17.03361
|
||||
102.0690 16.82544
|
||||
101.9004 16.52085
|
||||
101.7902 16.17219
|
||||
101.5551 16.16099
|
||||
101.3962 15.84821
|
||||
101.3255 15.53741
|
||||
101.3217 15.25436
|
||||
@@ -0,0 +1,7 @@
|
||||
103.42006 21.261522
|
||||
103.75862 21.015416
|
||||
104.12539 20.687015
|
||||
104.54859 20.386247
|
||||
104.94357 20.140313
|
||||
105.33856 19.811998
|
||||
105.67712 19.565892
|
||||
@@ -0,0 +1,35 @@
|
||||
97.160627 18.667449
|
||||
97.439913 18.296889
|
||||
97.702608 18.010759
|
||||
97.979130 17.718541
|
||||
98.283303 17.463175
|
||||
98.550606 17.271720
|
||||
98.765678 17.102918
|
||||
99.057562 16.870374
|
||||
99.306431 16.669104
|
||||
99.555300 16.377656
|
||||
99.831821 16.064504
|
||||
100.03921 15.742171
|
||||
100.21895 15.443462
|
||||
100.39869 15.086120
|
||||
100.56460 14.752446
|
||||
100.78582 14.447648
|
||||
100.96556 14.282677
|
||||
101.60156 13.889472
|
||||
101.90573 13.651846
|
||||
102.20990 13.442356
|
||||
102.51408 13.201059
|
||||
102.80719 12.957404
|
||||
103.10972 12.693070
|
||||
103.48190 12.438190
|
||||
103.78607 12.172426
|
||||
104.09025 11.987404
|
||||
104.36677 11.761654
|
||||
104.62946 11.466553
|
||||
104.91981 11.204539
|
||||
105.18250 10.882527
|
||||
105.43137 10.599681
|
||||
105.72172 10.344518
|
||||
106.01207 10.087398
|
||||
106.30241 9.8312565
|
||||
106.50981 9.6872272
|
||||
@@ -0,0 +1,105 @@
|
||||
# %%
|
||||
from pytomoatt.model import ATTModel
|
||||
import os
|
||||
import numpy as np
|
||||
import h5py
|
||||
# %% [markdown]
|
||||
# # Step 1. Generate the ATT model based on the crust1.0 model.
|
||||
|
||||
# %%
|
||||
# generate the .h5 model for TomoATT based on the crust1.0 model. Nearest extrapolation is used.
|
||||
|
||||
param_file = "./3_input_params/input_params_real.yaml"
|
||||
am_crust1p0 = ATTModel(param_file)
|
||||
am_crust1p0.grid_data_crust1(type="vp")
|
||||
|
||||
# %% [markdown]
|
||||
# # Step 2. Generate the ATT model based on ak135 model.
|
||||
|
||||
# %%
|
||||
# Step 2. Generate the ATT model based on ak135 model.
|
||||
|
||||
# ak135.h5 has a three-column dataset 'model'. First column: depth (in km), second column: Vp (in km/s), third column: Vs (in km/s).
|
||||
# a text version of the ak135 model can be found in Kennett et al. (1995):
|
||||
# Kennett, B. L., Engdahl, E. R., & Buland, R. (1995). Constraints on seismic velocities in the Earth from traveltimes. Geophysical Journal International, 122(1), 108-124.
|
||||
|
||||
# Load the 1D ak135 model from the .h5 file.
|
||||
with h5py.File('ak135.h5', 'r') as f:
|
||||
points_ak135 = f['model'][:]
|
||||
|
||||
am_ak135 = ATTModel(param_file)
|
||||
|
||||
# interpolate the 1D ak135 velocity model to the depths of the ATT model.
|
||||
vel_1d = np.interp(am_ak135.depths, points_ak135[:,0], points_ak135[:,1], left=points_ak135[0,1], right=points_ak135[-1,1])
|
||||
|
||||
# Set the 3D velocity model by tiling the 1D velocity model along lat and lon directions.
|
||||
am_ak135.vel = np.tile(vel_1d[:, None, None], (1, am_ak135.n_rtp[1], am_ak135.n_rtp[2]))
|
||||
|
||||
|
||||
# %% [markdown]
|
||||
# # Step 3. Combine ak135 model with crust1.0 model
|
||||
|
||||
# %%
|
||||
# 1. set two depths
|
||||
# if depth < depth_1, vel = crust1p0
|
||||
# if depth_1 <= depth <= depth_2, vel = linear_interp between crust1p0 and ak135
|
||||
# if depth > depth_2, vel = ak135
|
||||
|
||||
am_combined = ATTModel(param_file)
|
||||
|
||||
depth_1 = 35.0
|
||||
depth_2 = 70.0
|
||||
|
||||
ratio = (am_ak135.depths - depth_1) / (depth_2 - depth_1)
|
||||
ratio = np.clip(ratio, 0.0, 1.0)
|
||||
ratio_3d = np.tile(ratio[:, None, None], (1, am_ak135.n_rtp[1], am_ak135.n_rtp[2]))
|
||||
|
||||
# linear interpolation
|
||||
am_combined.vel = am_crust1p0.vel * (1 - ratio_3d) + am_ak135.vel * ratio_3d
|
||||
|
||||
# %% [markdown]
|
||||
# # Step 4. post processing (OPTIONAL)
|
||||
|
||||
# %%
|
||||
am_processed = am_combined.copy()
|
||||
|
||||
# 1. (OPTIONAL) monotonic increase check
|
||||
# Ensure that the velocity model increases monotonically with depth.
|
||||
am_processed.vel[::-1,:,:] = np.maximum.accumulate(am_processed.vel[::-1,:,:], axis=0)
|
||||
|
||||
# 2. (OPTIONAL) Gaussian smoothing to the combined model to avoid sharp discontinuities.
|
||||
## Upgrade PyTomoATT to version 0.2.10 or later to use this function.
|
||||
am_processed.smooth(sigma=am_processed.d_rtp, unit_deg=True, mode='nearest') # standard deviation for Gaussian kernel along each axis (ddep, dlat, dlon)
|
||||
|
||||
|
||||
# %%
|
||||
# output as .h5 file
|
||||
n_rtp = am_processed.n_rtp
|
||||
fname = f"constant_velocity_N{n_rtp[0]:d}_{n_rtp[1]:d}_{n_rtp[2]:d}_PyTomoATT.h5"
|
||||
am_processed.write(fname)
|
||||
|
||||
# %%
|
||||
# visualization of the central lat-lon slice
|
||||
import matplotlib.pyplot as plt
|
||||
dep = am_processed.depths
|
||||
vel = am_processed.vel[:, am_processed.n_rtp[1]//2, am_processed.n_rtp[2]//2]
|
||||
lat = am_processed.latitudes[am_processed.n_rtp[1]//2]
|
||||
lon = am_processed.longitudes[am_processed.n_rtp[2]//2]
|
||||
fig = plt.figure(figsize=(6,6))
|
||||
ax = fig.add_subplot(1,1,1)
|
||||
ax.plot(vel, dep, label='Velocity', color='blue')
|
||||
ax.invert_yaxis()
|
||||
ax.set_xlabel('Vp (km/s)', fontsize=16)
|
||||
ax.set_ylabel('Depth (km)', fontsize=16)
|
||||
ax.tick_params(axis='x', labelsize=16)
|
||||
ax.tick_params(axis='y', labelsize=16)
|
||||
|
||||
ax.set_title(f'Velocity Profile at Lat: {lat:.2f}, Lon: {lon:.2f}', fontsize=16)
|
||||
ax.grid()
|
||||
ax.legend(fontsize=16)
|
||||
plt.show()
|
||||
|
||||
os.makedirs("figs", exist_ok=True)
|
||||
fig.savefig("figs/velocity_profile_lat%.2f_lon%.2f.png"%(lat, lon), facecolor='white', edgecolor='white', bbox_inches='tight')
|
||||
|
||||
|
||||
@@ -0,0 +1,186 @@
|
||||
version: 3
|
||||
|
||||
#################################################
|
||||
# computational domain #
|
||||
#################################################
|
||||
domain:
|
||||
min_max_dep: [-50, 550] # depth in km
|
||||
min_max_lat: [10.5, 22.5] # latitude in degree
|
||||
min_max_lon: [95.5, 107.5] # longitude in degree
|
||||
n_rtp: [121, 121, 121] # number of nodes in depth,latitude,longitude direction
|
||||
|
||||
#################################################
|
||||
# traveltime data file path #
|
||||
#################################################
|
||||
source:
|
||||
src_rec_file: 1_src_rec_files/src_rec_file.dat # source receiver file path
|
||||
swap_src_rec: false # swap source and receiver
|
||||
|
||||
#################################################
|
||||
# initial model file path #
|
||||
#################################################
|
||||
model:
|
||||
init_model_path: 2_models/model_init_N121_121_121.h5 # path to initial model file
|
||||
|
||||
#################################################
|
||||
# parallel computation settings #
|
||||
#################################################
|
||||
parallel: # parameters for parallel computation
|
||||
n_sims: 8 # number of simultaneous runs (parallel the sources)
|
||||
ndiv_rtp: [1, 1, 1] # number of subdivision on each direction (parallel the computional domain)
|
||||
|
||||
############################################
|
||||
# output file setting #
|
||||
############################################
|
||||
output_setting:
|
||||
output_dir: OUTPUT_FILES/OUTPUT_FILES_real # path to output directory (default is ./OUTPUT_FILES/)
|
||||
output_source_field: false # True: output the traveltime field and adjoint field of all sources at each iteration. Default: false. File: 'out_data_sim_group_X'.
|
||||
output_kernel: false
|
||||
output_final_model: true # True: output merged final model. This file can be used as the input model for TomoATT. Default: true. File: 'model_final.h5'.
|
||||
output_middle_model: false # True: output merged intermediate models during inversion. This file can be used as the input model for TomoATT. Default: false. File: 'middle_model_step_XXXX.h5'
|
||||
output_in_process: false # True: output at each inv iteration, otherwise, only output step 0, Niter-1, Niter. Default: true. File: 'out_data_sim_group_0'.
|
||||
output_in_process_data: false # True: output src_rec_file at each inv iteration, otherwise, only output step 0, Niter-2, Niter-1. Default: true. File: 'src_rec_file_step_XXXX.dat'
|
||||
single_precision_output: false # True: output results in single precision. Default: false.
|
||||
verbose_output_level: 0 # output internal parameters, (to do)
|
||||
output_file_format: 0 # 0: hdf5, 1: ascii
|
||||
|
||||
# output files:
|
||||
# File: 'out_data_grid.h5'. Keys: ['Mesh']['elem_conn'], element index;
|
||||
# ['Mesh']['node_coords_p'], phi coordinates of nodes;
|
||||
# ['Mesh']['node_coords_t'], theta coordinates of nodes;
|
||||
# ['Mesh']['node_coords_r'], r coordinates of nodes;
|
||||
# ['Mesh']['node_coords_x'], phi coordinates of elements;
|
||||
# ['Mesh']['node_coords_y'], theta coordinates of elements;
|
||||
# ['Mesh']['node_coords_z'], r coordinates of elements;
|
||||
# File: 'out_data_sim_group_0'. Keys: ['model']['vel_inv_XXXX'], velocity model at iteration XXXX;
|
||||
# ['model']['xi_inv_XXXX'], xi model at iteration XXXX;
|
||||
# ['model']['eta_inv_XXXX'], eta model at iteration XXXX
|
||||
# ['model']['Ks_inv_XXXX'], sensitivity kernel related to slowness at iteration XXXX
|
||||
# ['model']['Kxi_inv_XXXX'], sensitivity kernel related to xi at iteration XXXX
|
||||
# ['model']['Keta_inv_XXXX'], sensitivity kernel related to eta at iteration XXXX
|
||||
# ['model']['Ks_density_inv_XXXX'], kernel density of Ks at iteration XXXX
|
||||
# ['model']['Kxi_density_inv_XXXX'], kernel density of Kxi at iteration XXXX
|
||||
# ['model']['Keta_density_inv_XXXX'], kernel density of Keta at iteration XXXX
|
||||
# ['model']['Ks_over_Kden_inv_XXXX'], slowness kernel over kernel density at iteration XXXX
|
||||
# ['model']['Kxi_over_Kden_inv_XXXX'], xi kernel over kernel density at iteration XXXX
|
||||
# ['model']['Keta_over_Kden_inv_XXXX'], eta kernel over kernel density at iteration XXXX
|
||||
# ['model']['Ks_update_inv_XXXX'], slowness kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['model']['Kxi_update_inv_XXXX'], xi kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['model']['Keta_update_inv_XXXX'], eta kernel over kernel density at iteration XXXX, smoothed by inversion grid
|
||||
# ['1dinv']['vel_1dinv_inv_XXXX'], 2d velocity model at iteration XXXX, in 1d inversion mode
|
||||
# ['1dinv']['r_1dinv'], r coordinates (depth), in 1d inversion mode
|
||||
# ['1dinv']['t_1dinv'], t coordinates (epicenter distance), in 1d inversion mode
|
||||
# File: 'src_rec_file_step_XXXX.dat' or 'src_rec_file_forward.dat'. The synthetic traveltime data file.
|
||||
# File: 'final_model.h5'. Keys: ['eta'], ['xi'], ['vel'], the final model.
|
||||
# File: 'middle_model_step_XXXX.h5'. Keys: ['eta'], ['xi'], ['vel'], the model at step XXXX.
|
||||
# File: 'inversion_grid.txt'. The location of inversion grid nodes
|
||||
# File: 'objective_function.txt'. The objective function value at each iteration
|
||||
# File: 'out_data_sim_group_X'. Keys: ['src_YYYY']['time_field_inv_XXXX'], traveltime field of source YYYY at iteration XXXX;
|
||||
# ['src_YYYY']['adjoint_field_inv_XXXX'], adjoint field of source YYYY at iteration XXXX;
|
||||
# ['1dinv']['time_field_1dinv_YYYY_inv_XXXX'], 2d traveltime field of source YYYY at iteration XXXX, in 1d inversion mode
|
||||
# ['1dinv']['adjoint_field_1dinv_YYYY_inv_XXXX'], 2d adjoint field of source YYYY at iteration XXXX, in 1d inversion mode
|
||||
|
||||
|
||||
#################################################
|
||||
# inversion or forward modeling #
|
||||
#################################################
|
||||
# run mode
|
||||
# 0 for forward simulation only,
|
||||
# 1 for inversion
|
||||
# 2 for earthquake relocation
|
||||
# 3 for inversion + earthquake relocation
|
||||
# 4 for 1d model inversion
|
||||
run_mode: 1
|
||||
|
||||
have_tele_data: true # An error will be reported if false but source out of study region is used. Default: false.
|
||||
|
||||
###################################################
|
||||
# model update parameters setting #
|
||||
###################################################
|
||||
model_update:
|
||||
max_iterations: 80 # maximum number of inversion iterations
|
||||
|
||||
step_length: 0.01 # the initial step length of model perturbation. 0.01 means maximum 1% perturbation for each iteration.
|
||||
|
||||
# parameters for optim_method 0 (gradient_descent)
|
||||
optim_method_0:
|
||||
# if step_method:1. if the angle between the current and the previous gradients is greater than step_length_gradient_angle, step size -> step length * step_length_change[0].
|
||||
# otherwise, step size -> step length * step_length_change[1].
|
||||
step_length_gradient_angle: 120 # default: 120.0
|
||||
step_length_change: [0.5, 1.41] # default: [0.5,1.2]
|
||||
Kdensity_coe: 0.3 # default: 0.0, range: 0.0 - 1.0
|
||||
|
||||
# parameters for smooth method 0 (multigrid model parametrization)
|
||||
# inversion grid can be viewed in OUTPUT_FILES/inversion_grid.txt
|
||||
n_inversion_grid: 5 # number of inversion grid sets
|
||||
|
||||
uniform_inv_grid_dep: false # true if use uniform inversion grid for dep, false if use flexible inversion grid
|
||||
uniform_inv_grid_lat: false # true if use uniform inversion grid for lat, false if use flexible inversion grid
|
||||
uniform_inv_grid_lon: false # true if use uniform inversion grid for lon, false if use flexible inversion grid
|
||||
|
||||
# settings for flexible inversion grid
|
||||
dep_inv: [-50, -25, 0, 50, 100, 150, 200, 275, 350, 425, 500, 600,700] # inversion grid for vel in depth (km)
|
||||
lat_inv: [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] # inversion grid for vel in latitude (degree)
|
||||
lon_inv: [95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108] # inversion grid for vel in longitude (degree)
|
||||
trapezoid: [1, 0, 50] # usually set as [1.0, 0.0, 50.0] (default)
|
||||
|
||||
# if we want to use another inversion grid for inverting anisotropy, set invgrid_ani: true (default: false)
|
||||
invgrid_ani: false
|
||||
|
||||
# ---------- flexible inversion grid setting for anisotropy ----------
|
||||
# settings for flexible inversion grid for anisotropy
|
||||
dep_inv_ani: [-5, -2, 0, 3, 7, 12, 17, 23, 30, 38, 47, 57] # inversion grid for ani in depth (km)
|
||||
lat_inv_ani: [-2.8, -2.3, -1.8, -1.3, -0.8, -0.3, 0.2, 0.7, 1.2, 1.7, 2.2, 2.7] # inversion grid for ani in latitude (degree)
|
||||
lon_inv_ani: [-1.2, -0.9, -0.6, -0.3, 0, 0.3, 0.6, 0.9, 1.2] # inversion grid for ani in longitude (degree)
|
||||
trapezoid_ani: [1, 0, 50] # usually set as [1.0, 0.0, 50.0] (default)
|
||||
|
||||
|
||||
# Carefully change trapezoid and trapezoid_ani, if you really want to use trapezoid inversion grid, increasing the inversion grid spacing with depth to account for the worse data coverage in greater depths.
|
||||
# The trapezoid_ inversion grid with index (i,j,k) in longitude, latitude, and depth is defined as:
|
||||
# if dep_inv[k] < trapezoid[1], lon = lon_inv[i];
|
||||
# lat = lat_inv[j];
|
||||
# dep = dep_inv[k];
|
||||
# if trapezoid[1] <= dep_inv[k] < trapezoid[2], lon = mid_lon_inv+(lon_inv[i]-mid_lon_inv)*(dep_inv[k]-trapezoid[1])/(trapezoid[2]-trapezoid[1])*trapezoid[0];
|
||||
# lat = mid_lat_inv+(lat_inv[i]-mid_lat_inv)*(dep_inv[k]-trapezoid[1])/(trapezoid[2]-trapezoid[1])*trapezoid[0];
|
||||
# dep = dep_inv[k];
|
||||
# if trapezoid[2] <= dep_inv[k], lon = mid_lon_inv+(lon_inv[i]-mid_lon_inv)*trapezoid[0];
|
||||
# lat = mid_lat_inv+(lat_inv[i]-mid_lat_inv)*trapezoid[0];
|
||||
# dep = dep_inv[k];
|
||||
# The shape of trapezoid inversion gird (x) looks like:
|
||||
#
|
||||
# lon_inv[0] [1] [2] [3] [4]
|
||||
# |<-------- (lon_inv[end] - lon_inv[0]) ---->|
|
||||
# dep_inv[0] | x x x x x |
|
||||
# | |
|
||||
# dep_inv[1] | x x x x x |
|
||||
# | |
|
||||
# dep_inv[2] = trapezoid[1] / x x x x x \
|
||||
# / \
|
||||
# dep_inv[3] / x x x x x \
|
||||
# / \
|
||||
# dep_inv[4] = trapezoid[2] / x x x x x \
|
||||
# | |
|
||||
# dep_inv[5] | x x x x x |
|
||||
# | |
|
||||
# dep_inv[6] | x x x x x |
|
||||
# |<---- trapezoid[0]* (lon_inv[end] - lon_inv[0]) ------>|
|
||||
|
||||
# -------------- using absolute traveltime data --------------
|
||||
abs_time:
|
||||
use_abs_time: false # 'true' for using absolute traveltime data to update model parameters; 'false' for not using (no need to set parameters in this section)
|
||||
|
||||
# -------------- using common source differential traveltime data --------------
|
||||
cs_dif_time:
|
||||
use_cs_time: true # 'true' for using common source differential traveltime data to update model parameters; 'false' for not using (no need to set parameters in this section)
|
||||
|
||||
# -------------- using common receiver differential traveltime data --------------
|
||||
cr_dif_time:
|
||||
use_cr_time: false # 'true' for using common receiver differential traveltime data to update model parameters; 'false' for not using (no need to set parameters in this section)
|
||||
|
||||
# -------------- inversion parameters --------------
|
||||
update_slowness : true # update slowness (velocity) or not. default: true
|
||||
update_azi_ani : false # update azimuthal anisotropy (xi, eta) or not. default: false
|
||||
|
||||
use_sta_correction: true
|
||||
# initial_sta_correction_file: 4_initial_station_correct/station_correction_file_step_0010.dat # the path of initial station correction
|
||||
step_length_sta_correction: 0.01 # step length relate to the update of station correction terms
|
||||
BIN
examples/scripts_of_generate_community_model/ak135.h5
Normal file
BIN
examples/scripts_of_generate_community_model/ak135.h5
Normal file
Binary file not shown.
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
# run the script to generate HDF5 model files based on community models
|
||||
# 1. crust1.0 (Laske et al., 2013) + ak135 model (Kennett et al., 1995)
|
||||
python 1_crust1.0_ak135_model.py
|
||||
|
||||
|
||||
# References:
|
||||
# Laske, G., Masters, G., Ma, Z., & Pasyanos, M. (2013, April). Update on CRUST1. 0—A 1-degree global model of Earth’s crust. In Geophysical research abstracts (Vol. 15, No. 15, p. 2658).
|
||||
# Kennett, B. L., Engdahl, E. R., & Buland, R. (1995). Constraints on seismic velocities in the Earth from traveltimes. Geophysical Journal International, 122(1), 108-124.
|
||||
287
examples/scripts_of_generate_hdf5_model/1_generate_models.py
Normal file
287
examples/scripts_of_generate_hdf5_model/1_generate_models.py
Normal file
@@ -0,0 +1,287 @@
|
||||
# %%
|
||||
from pytomoatt.model import ATTModel
|
||||
from pytomoatt.checkerboard import Checker
|
||||
import os
|
||||
import numpy as np
|
||||
import h5py
|
||||
|
||||
# %% [markdown]
|
||||
# # read YAML parameter file to obtain the grid parameters for the model
|
||||
|
||||
# %%
|
||||
output_path = "models"
|
||||
os.makedirs(output_path, exist_ok=True)
|
||||
|
||||
par_file = "input_params/input_params.yaml"
|
||||
|
||||
att_model = ATTModel(par_file)
|
||||
|
||||
n_rtp = att_model.n_rtp # grid node numbers in r (depth), t (longtitude), p (latitude) directions
|
||||
am_depths = att_model.depths # depths in km
|
||||
am_latitudes = att_model.latitudes # latitudes in degrees
|
||||
am_longitudes = att_model.longitudes # longitudes in degrees
|
||||
|
||||
print("grid node numbers (N_r, N_t, N_p):", n_rtp)
|
||||
print("depths (km):", am_depths)
|
||||
print("latitudes (degree):", am_latitudes)
|
||||
print("longitudes (degree):", am_longitudes)
|
||||
|
||||
# %% [markdown]
|
||||
# # eg1. generate model with constant velocity
|
||||
|
||||
# %%
|
||||
# case 1. ---------- generate a constant velocity model using PyTomoATT module -------------
|
||||
|
||||
# set the velocity model to a constant value
|
||||
constant_v = 6.0 # constant velocity (km/s)
|
||||
att_model.vel[:,:,:] = constant_v
|
||||
att_model.xi[:,:,:] = 0.0
|
||||
att_model.eta[:,:,:] = 0.0
|
||||
|
||||
# write the model to a file
|
||||
fname = "%s/constant_velocity_N%d_%d_%d_PyTomoATT.h5"%(output_path, n_rtp[0], n_rtp[1], n_rtp[2])
|
||||
att_model.write(fname)
|
||||
print("generate model using PyTomoATT:", fname)
|
||||
|
||||
|
||||
# case 2. ---------- generate a constant velocity model using plain loop (h5py module is required) -------------
|
||||
|
||||
# set the velocity model to a constant value
|
||||
vel = np.zeros(n_rtp)
|
||||
xi = np.zeros(n_rtp)
|
||||
eta = np.zeros(n_rtp)
|
||||
for ir in range(n_rtp[0]):
|
||||
for it in range(n_rtp[1]):
|
||||
for ip in range(n_rtp[2]):
|
||||
vel[ir, it, ip] = constant_v
|
||||
|
||||
fname = "%s/constant_velocity_N%d_%d_%d_loop.h5"%(output_path, n_rtp[0], n_rtp[1], n_rtp[2])
|
||||
|
||||
with h5py.File(fname, 'w') as f:
|
||||
f.create_dataset('vel', data=vel)
|
||||
f.create_dataset('xi', data=xi)
|
||||
f.create_dataset('eta', data=eta)
|
||||
print("generate model using plain loop:", fname)
|
||||
|
||||
# %% [markdown]
|
||||
# # eg2. generate a linear velocity model:
|
||||
# vel = 5.0, if depth < 0 km
|
||||
#
|
||||
# vel = 5.0 + 0.1 * depth, if 0 km <= depth <= 30 km
|
||||
#
|
||||
# vel = 8.0, if depth > 30 km
|
||||
|
||||
# %%
|
||||
# case 1. ---------- generate a constant velocity model using PyTomoATT module -------------
|
||||
|
||||
# set the velocity model to a constant value
|
||||
idx = np.where((am_depths >= 0.0) & (am_depths <= 30.0))
|
||||
depth = am_depths[idx]
|
||||
att_model.vel[idx,:,:] = 5.0 + 0.1 * depth[:, np.newaxis, np.newaxis] # velocity increases linearly from 5.0 to 8.0 km/s
|
||||
att_model.vel[np.where(am_depths > 30.0),:,:] = 8.0 # velocity is constant at 8.0 km/s below 30.0 km depth
|
||||
att_model.vel[np.where(am_depths < 0.0),:,:] = 5.0 # velocity is constant at 5.0 km/s above 0.0 km depth
|
||||
|
||||
att_model.xi[:,:,:] = 0.0
|
||||
att_model.eta[:,:,:] = 0.0
|
||||
|
||||
# write the model to a file
|
||||
fname = "%s/linear_velocity_N%d_%d_%d_PyTomoATT.h5"%(output_path, n_rtp[0], n_rtp[1], n_rtp[2])
|
||||
att_model.write(fname)
|
||||
print("generate model using PyTomoATT:", fname)
|
||||
|
||||
# case 2. ---------- generate a linear velocity model using plain loop (h5py module is required) -------------
|
||||
|
||||
# set the velocity model to a linear value
|
||||
vel = np.zeros(n_rtp)
|
||||
xi = np.zeros(n_rtp)
|
||||
eta = np.zeros(n_rtp)
|
||||
|
||||
for ir in range(n_rtp[0]):
|
||||
for it in range(n_rtp[1]):
|
||||
for ip in range(n_rtp[2]):
|
||||
if am_depths[ir] < 0.0:
|
||||
vel[ir, it, ip] = 5.0
|
||||
elif am_depths[ir] <= 30.0:
|
||||
vel[ir, it, ip] = 5.0 + 0.1 * am_depths[ir]
|
||||
else:
|
||||
vel[ir, it, ip] = 8.0
|
||||
fname = "%s/linear_velocity_N%d_%d_%d_loop.h5"%(output_path, n_rtp[0], n_rtp[1], n_rtp[2])
|
||||
|
||||
with h5py.File(fname, 'w') as f:
|
||||
f.create_dataset('vel', data=vel)
|
||||
f.create_dataset('xi', data=xi)
|
||||
f.create_dataset('eta', data=eta)
|
||||
print("generate model using plain loop:", fname)
|
||||
|
||||
# %% [markdown]
|
||||
# # eg3. generate checkerboard model for velocity and anisotropy.
|
||||
#
|
||||
# assign perturbation
|
||||
|
||||
# %%
|
||||
# case 1. ---------- generate a constant velocity model using PyTomoATT module -------------
|
||||
|
||||
# file name of the background model
|
||||
bg_model_fname = "%s/linear_velocity_N%d_%d_%d_PyTomoATT.h5" % (output_path, n_rtp[0], n_rtp[1], n_rtp[2])
|
||||
|
||||
lim_x = [0.5, 1.5] # longitude limits of the checkerboard
|
||||
lim_y = [0.25, 0.75] # latitude limits of the checkerboard
|
||||
lim_z = [0, 30] # depth limits of the checkerboard
|
||||
pert_vel = 0.1 # amplitude of velocity perturbation (%)
|
||||
pert_ani = 0.05 # amplitude of anisotropy perturbation (fraction)
|
||||
ani_dir = 60.0 # fast velocity direction (anti-clockwise from x-axis, in degrees)
|
||||
n_pert_x = 4 # number of checkers in x (lon) direction
|
||||
n_pert_y = 2 # number of checkers in y (lat) direction
|
||||
n_pert_z = 3 # number of checkers in z (dep) direction
|
||||
|
||||
size_x = (lim_x[1] - lim_x[0]) / n_pert_x # size of each checker in x direction
|
||||
size_y = (lim_y[1] - lim_y[0]) / n_pert_y # size of each checker in y direction
|
||||
size_z = (lim_z[1] - lim_z[0]) / n_pert_z # size of each checker in z direction
|
||||
|
||||
|
||||
ckb = Checker(bg_model_fname, para_fname=par_file)
|
||||
# n_pert_x, n_pert_y, n_pert_z: number of checkers in x (lon), y (lat), z (dep) directions
|
||||
# pert_vel: amplitude of velocity perturbation (km/s)
|
||||
# pert_ani: amplitude of anisotropy perturbation (fraction)
|
||||
# ani_dir: fast velicty direction (anti-cloclkwise from x-axis, in degrees)
|
||||
# lim_x, lim_y, lim_z: limits of the checkerboard in x (lon), y (lat), z (dep) directions
|
||||
ckb.checkerboard(
|
||||
n_pert_x=n_pert_x, n_pert_y=n_pert_y, n_pert_z=n_pert_z,
|
||||
pert_vel=pert_vel, pert_ani=pert_ani, ani_dir=ani_dir,
|
||||
lim_x=lim_x, lim_y=lim_y, lim_z=lim_z
|
||||
)
|
||||
|
||||
fname = "%s/linear_velocity_ckb_N%d_%d_%d_PyTomoATT.h5" % (output_path, n_rtp[0], n_rtp[1], n_rtp[2])
|
||||
ckb.write(fname)
|
||||
print("generate checkerboard model based on the linear velocity model using PyTomoATT:", fname)
|
||||
|
||||
# case 2. ---------- generate a checkerboard model using plain loop (h5py module is required) -------------
|
||||
|
||||
# read the background model
|
||||
bg_model = np.zeros(n_rtp)
|
||||
with h5py.File(bg_model_fname, 'r') as f:
|
||||
bg_model = f['vel'][:]
|
||||
|
||||
# set the checkerboard model
|
||||
vel = np.zeros(n_rtp)
|
||||
xi = np.zeros(n_rtp)
|
||||
eta = np.zeros(n_rtp)
|
||||
|
||||
for ir in range(n_rtp[0]):
|
||||
for it in range(n_rtp[1]):
|
||||
for ip in range(n_rtp[2]):
|
||||
depth = am_depths[ir]
|
||||
lat = am_latitudes[it]
|
||||
lon = am_longitudes[ip]
|
||||
|
||||
# check if the current grid node is within the checkerboard limits
|
||||
if (lim_x[0] <= lon <= lim_x[1]) and (lim_y[0] <= lat <= lim_y[1]) and (lim_z[0] <= depth <= lim_z[1]):
|
||||
|
||||
sigma_vel = np.sin(np.pi * (lon - lim_x[0])/size_x) * np.sin(np.pi * (lat - lim_y[0])/size_y) * np.sin(np.pi * (depth - lim_z[0])/size_z)
|
||||
sigma_ani = np.sin(np.pi * (lon - lim_x[0])/size_x) * np.sin(np.pi * (lat - lim_y[0])/size_y) * np.sin(np.pi * (depth - lim_z[0])/size_z)
|
||||
|
||||
if (sigma_ani > 0):
|
||||
psi = ani_dir / 180.0 * np.pi # convert degrees to radians
|
||||
elif (sigma_ani < 0):
|
||||
psi = (ani_dir + 90.0) / 180.0 * np.pi
|
||||
else:
|
||||
psi = 0.0
|
||||
|
||||
else:
|
||||
sigma_vel = 0.0
|
||||
sigma_ani = 0.0
|
||||
psi = 0.0
|
||||
|
||||
# set the velocity and anisotropy
|
||||
vel[ir, it, ip] = bg_model[ir, it, ip] * (1.0 + pert_vel * sigma_vel)
|
||||
xi[ir, it, ip] = pert_ani * abs(sigma_ani) * np.cos(2*psi)
|
||||
eta[ir, it, ip] = pert_ani * abs(sigma_ani) * np.sin(2*psi)
|
||||
|
||||
# write the model to a file
|
||||
fname = "%s/linear_velocity_ckb_N%d_%d_%d_loop.h5" % (output_path, n_rtp[0], n_rtp[1], n_rtp[2])
|
||||
with h5py.File(fname, 'w') as f:
|
||||
f.create_dataset('vel', data=vel)
|
||||
f.create_dataset('xi', data=xi)
|
||||
f.create_dataset('eta', data=eta)
|
||||
print("generate checkerboard model based on the linear velocity model using plain loop:", fname)
|
||||
|
||||
|
||||
# %% [markdown]
|
||||
# # eg4. generate flexible checkerboard model
|
||||
#
|
||||
# the checker size increases with depth;
|
||||
#
|
||||
# the checker size is large for anisotropy;
|
||||
|
||||
# %%
|
||||
# only
|
||||
|
||||
# file name of the background model
|
||||
bg_model_fname = "%s/linear_velocity_N%d_%d_%d_PyTomoATT.h5" % (output_path, n_rtp[0], n_rtp[1], n_rtp[2])
|
||||
|
||||
# read the background model
|
||||
bg_model = np.zeros(n_rtp)
|
||||
with h5py.File(bg_model_fname, 'r') as f:
|
||||
bg_model = f['vel'][:]
|
||||
|
||||
# set the checkerboard model
|
||||
vel = np.zeros(n_rtp)
|
||||
xi = np.zeros(n_rtp)
|
||||
eta = np.zeros(n_rtp)
|
||||
|
||||
for ir in range(n_rtp[0]):
|
||||
for it in range(n_rtp[1]):
|
||||
for ip in range(n_rtp[2]):
|
||||
depth = am_depths[ir]
|
||||
lat = am_latitudes[it]
|
||||
lon = am_longitudes[ip]
|
||||
|
||||
if ((depth >= 0.0) and (depth <= 8.0)):
|
||||
size_vel = 0.2
|
||||
size_ani = 0.3
|
||||
|
||||
sigma_vel = np.sin(np.pi * lon/size_vel) * np.sin(np.pi * lat/size_vel) * np.sin(np.pi * depth/8.0)
|
||||
sigma_ani = np.sin(np.pi * lon/size_ani) * np.sin(np.pi * lat/size_ani) * np.sin(np.pi * depth/8.0)
|
||||
|
||||
elif ((depth > 8.0) and (depth <= 20.0)):
|
||||
|
||||
size_vel = 0.3
|
||||
size_ani = 0.4
|
||||
|
||||
sigma_vel = np.sin(np.pi * lon/size_vel) * np.sin(np.pi * lat/size_vel) * np.sin(np.pi * (depth - 8.0)/12.0 + np.pi)
|
||||
sigma_ani = np.sin(np.pi * lon/size_ani) * np.sin(np.pi * lat/size_ani) * np.sin(np.pi * (depth - 8.0)/12.0 + np.pi)
|
||||
|
||||
elif ((depth > 20.0) and (depth <= 36.0)):
|
||||
|
||||
size_vel = 0.4
|
||||
size_ani = 0.5
|
||||
|
||||
sigma_vel = np.sin(np.pi * lon/size_vel) * np.sin(np.pi * lat/size_vel) * np.sin(np.pi * (depth - 20.0)/16.0 + 2*np.pi)
|
||||
sigma_ani = np.sin(np.pi * lon/size_ani) * np.sin(np.pi * lat/size_ani) * np.sin(np.pi * (depth - 20.0)/16.0 + 2*np.pi)
|
||||
|
||||
else:
|
||||
sigma_vel = 0.0
|
||||
sigma_ani = 0.0
|
||||
|
||||
if (sigma_ani > 0):
|
||||
psi = ani_dir / 180.0 * np.pi # convert degrees to radians
|
||||
elif (sigma_ani < 0):
|
||||
psi = (ani_dir + 90.0) / 180.0 * np.pi
|
||||
else:
|
||||
psi = 0.0
|
||||
|
||||
# set the velocity and anisotropy
|
||||
vel[ir, it, ip] = bg_model[ir, it, ip] * (1.0 + pert_vel * sigma_vel)
|
||||
xi[ir, it, ip] = pert_ani * abs(sigma_ani) * np.cos(2*psi)
|
||||
eta[ir, it, ip] = pert_ani * abs(sigma_ani) * np.sin(2*psi)
|
||||
|
||||
# write the model to a file
|
||||
fname = "%s/linear_velocity_ckb_flex_N%d_%d_%d.h5" % (output_path, n_rtp[0], n_rtp[1], n_rtp[2])
|
||||
with h5py.File(fname, 'w') as f:
|
||||
f.create_dataset('vel', data=vel)
|
||||
f.create_dataset('xi', data=xi)
|
||||
f.create_dataset('eta', data=eta)
|
||||
|
||||
print("generate flexible checkerboard model based on the linear velocity model using plain loop:", fname)
|
||||
|
||||
|
||||
338
examples/scripts_of_generate_hdf5_model/2_plot_models.py
Normal file
338
examples/scripts_of_generate_hdf5_model/2_plot_models.py
Normal file
@@ -0,0 +1,338 @@
|
||||
# %%
|
||||
import pygmt
|
||||
pygmt.config(FONT="16p", IO_SEGMENT_MARKER="<<<")
|
||||
|
||||
from pytomoatt.model import ATTModel
|
||||
from pytomoatt.data import ATTData
|
||||
import numpy as np
|
||||
|
||||
|
||||
# %%
|
||||
import os
|
||||
output_path = "figs"
|
||||
os.makedirs(output_path, exist_ok=True)
|
||||
|
||||
|
||||
# %% [markdown]
|
||||
# # eg1. plot constant velocity model
|
||||
|
||||
# %%
|
||||
# ---------------- read model files ----------------
|
||||
# file names
|
||||
# init_model_file = 'models/constant_velocity_N61_51_101_PyTomoATT.h5' # initial model file
|
||||
init_model_file = 'models/constant_velocity_N61_51_101_loop.h5' # initial model file
|
||||
par_file = 'input_params/input_params.yaml' # parameter file
|
||||
|
||||
# read initial and final model file
|
||||
att_model = ATTModel.read(init_model_file, par_file)
|
||||
init_model = att_model.to_xarray()
|
||||
|
||||
# interp vel at depth = 20 km
|
||||
depth = 20.0
|
||||
vel_init = init_model.interp_dep(depth, field='vel') # vel_init[i,:] are (lon, lat, vel)
|
||||
|
||||
# ----------------- pygmt plot ------------------
|
||||
|
||||
fig = pygmt.Figure()
|
||||
pygmt.makecpt(cmap="seis", series=[5, 7], background=True, reverse=False) # colorbar
|
||||
|
||||
|
||||
# ------------ plot horizontal profile of velocity ------------
|
||||
region = [0, 2, 0, 1] # region of interest
|
||||
fig.basemap(region=region, frame=["xa1","ya1","+tVelocity (km/s)"], projection="M10c") # base map
|
||||
|
||||
depth = 20.0
|
||||
prof_init = init_model.interp_dep(depth, field='vel') # prof_init[i,:] are (lon, lat, vel)
|
||||
lon = prof_init[:,0] # longitude
|
||||
lat = prof_init[:,1] # latitude
|
||||
vel = prof_init[:,2] # velocity
|
||||
|
||||
grid = pygmt.surface(x=lon, y=lat, z=vel, spacing=0.04,region=region)
|
||||
|
||||
fig.grdimage(grid = grid) # plot figure
|
||||
fig.text(text="%d km"%(depth), x = 0.2 , y = 0.1, font = "14p,Helvetica-Bold,black", fill = "white")
|
||||
|
||||
# colorbar
|
||||
fig.shift_origin(xshift=0, yshift=-1.5)
|
||||
fig.colorbar(frame = ["a1","y+lVp (km/s)"], position="+e+w4c/0.3c+h")
|
||||
fig.shift_origin(xshift=0, yshift= 1.5)
|
||||
|
||||
# ------------ plot horivertical profile of velocity ------------
|
||||
fig.shift_origin(xshift=11, yshift= 0)
|
||||
|
||||
region = [0, 40, 0, 1] # region of interest
|
||||
fig.basemap(region=region, frame=["xa20+lDepth (km)","ya1+lLatitude","nSwE"], projection="X3c/5c") # base map
|
||||
|
||||
start = [1,0]; end = [1,1]; gap = 1
|
||||
prof_init = init_model.interp_sec(start, end, field='vel', val = gap) # prof_init[i,:] are (lon, lat, dis, dep, vel)
|
||||
lat = prof_init[:,1] # lat
|
||||
dep = prof_init[:,3] # depth
|
||||
vel = prof_init[:,4] # velocity
|
||||
|
||||
grid = pygmt.surface(x=dep, y=lat, z=vel, spacing="1/0.04",region=region)
|
||||
|
||||
fig.grdimage(grid = grid) # plot figure
|
||||
|
||||
fig.savefig("figs/constant_velocity.png") # save figure
|
||||
fig.show()
|
||||
|
||||
|
||||
|
||||
# %% [markdown]
|
||||
# # eg2. plot linear velocity model
|
||||
|
||||
# %%
|
||||
# ---------------- read model files ----------------
|
||||
# file names
|
||||
# init_model_file = 'models/linear_velocity_N61_51_101_PyTomoATT.h5' # initial model file
|
||||
init_model_file = 'models/linear_velocity_N61_51_101_loop.h5' # initial model file
|
||||
par_file = 'input_params/input_params.yaml' # parameter file
|
||||
|
||||
# read initial and final model file
|
||||
att_model = ATTModel.read(init_model_file, par_file)
|
||||
init_model = att_model.to_xarray()
|
||||
|
||||
# # interp vel at depth = 20 km
|
||||
# depth = 20.0
|
||||
# vel_init = init_model.interp_dep(depth, field='vel') # vel_init[i,:] are (lon, lat, vel)
|
||||
|
||||
# ----------------- pygmt plot ------------------
|
||||
|
||||
fig = pygmt.Figure()
|
||||
pygmt.makecpt(cmap="seis", series=[5, 8], background=True, reverse=False) # colorbar
|
||||
|
||||
|
||||
# ------------ plot horizontal profile of velocity ------------
|
||||
region = [0, 2, 0, 1] # region of interest
|
||||
fig.basemap(region=region, frame=["xa1","ya1","+tVelocity (km/s)"], projection="M10c") # base map
|
||||
|
||||
depth = 20.0
|
||||
prof_init = init_model.interp_dep(depth, field='vel') # prof_init[i,:] are (lon, lat, vel)
|
||||
lon = prof_init[:,0] # longitude
|
||||
lat = prof_init[:,1] # latitude
|
||||
vel = prof_init[:,2] # velocity
|
||||
|
||||
grid = pygmt.surface(x=lon, y=lat, z=vel, spacing=0.04,region=region)
|
||||
|
||||
fig.grdimage(grid = grid) # plot figure
|
||||
fig.text(text="%d km"%(depth), x = 0.2 , y = 0.1, font = "14p,Helvetica-Bold,black", fill = "white")
|
||||
|
||||
# colorbar
|
||||
fig.shift_origin(xshift=0, yshift=-1.5)
|
||||
fig.colorbar(frame = ["a1","y+lVp (km/s)"], position="+e+w4c/0.3c+h")
|
||||
fig.shift_origin(xshift=0, yshift= 1.5)
|
||||
|
||||
# ------------ plot horivertical profile of velocity ------------
|
||||
fig.shift_origin(xshift=11, yshift= 0)
|
||||
|
||||
region = [0, 40, 0, 1] # region of interest
|
||||
fig.basemap(region=region, frame=["xa20+lDepth (km)","ya1+lLatitude","nSwE"], projection="X3c/5c") # base map
|
||||
|
||||
start = [1,0]; end = [1,1]; gap = 1
|
||||
prof_init = init_model.interp_sec(start, end, field='vel', val = gap) # prof_init[i,:] are (lon, lat, dis, dep, vel)
|
||||
lat = prof_init[:,1] # lat
|
||||
dep = prof_init[:,3] # depth
|
||||
vel = prof_init[:,4] # velocity
|
||||
|
||||
grid = pygmt.surface(x=dep, y=lat, z=vel, spacing="1/0.04",region=region)
|
||||
|
||||
fig.grdimage(grid = grid) # plot figure
|
||||
|
||||
fig.savefig("figs/linear_velocity.png") # save figure
|
||||
fig.show()
|
||||
|
||||
|
||||
|
||||
# %% [markdown]
|
||||
# # eg3. plot checkerboard model
|
||||
|
||||
# %%
|
||||
# ---------------- read model files ----------------
|
||||
# file names
|
||||
init_model_file = 'models/linear_velocity_N61_51_101_PyTomoATT.h5' # initial model file
|
||||
ckb_model_file = 'models/linear_velocity_ckb_N61_51_101_PyTomoATT.h5' # checkerboard model file
|
||||
# ckb_model_file = 'models/linear_velocity_ckb_N61_51_101_loop.h5' # checkerboard model file
|
||||
par_file = 'input_params/input_params.yaml' # parameter file
|
||||
|
||||
# read initial and final model file
|
||||
att_model = ATTModel.read(init_model_file, par_file)
|
||||
init_model = att_model.to_xarray()
|
||||
|
||||
att_model = ATTModel.read(ckb_model_file, par_file)
|
||||
ckb_model = att_model.to_xarray()
|
||||
|
||||
# # interp vel at depth = 20 km
|
||||
# depth = 20.0
|
||||
# vel_init = init_model.interp_dep(depth, field='vel') # vel_init[i,:] are (lon, lat, vel)
|
||||
# vel_ckb = ckb_model.interp_dep(depth, field='vel') # vel_ckb[i,:] are (lon, lat, vel)
|
||||
|
||||
# ----------------- pygmt plot ------------------
|
||||
|
||||
fig = pygmt.Figure()
|
||||
pygmt.makecpt(cmap="../utils/svel13_chen.cpt", series=[-10,10], background=True, reverse=False) # colorbar
|
||||
|
||||
|
||||
# ------------ plot horizontal profile of velocity ------------
|
||||
region = [0, 2, 0, 1] # region of interest
|
||||
fig.basemap(region=region, frame=["xa1","ya1","+tVelocity perturbation (%)"], projection="M10c") # base map
|
||||
|
||||
# velocity perturbation at depth = 15 km
|
||||
depth = 15.0
|
||||
prof_init = init_model.interp_dep(depth, field='vel') # prof_init[i,:] are (lon, lat, vel)
|
||||
prof_ckb = ckb_model.interp_dep(depth, field='vel') # prof_ckb[i,:] are (lon, lat, vel)
|
||||
lon = prof_init[:,0] # longitude
|
||||
lat = prof_init[:,1] # latitude
|
||||
vel_pert = (prof_ckb[:,2] - prof_init[:,2])/prof_init[:,2] * 100 # velocity perturbation related to initial model
|
||||
|
||||
grid = pygmt.surface(x=lon, y=lat, z=vel_pert, spacing=0.01,region=region)
|
||||
|
||||
fig.grdimage(grid = grid) # plot figure
|
||||
fig.text(text="%d km"%(depth), x = 0.2 , y = 0.1, font = "14p,Helvetica-Bold,black", fill = "white")
|
||||
|
||||
# fast velocity directions (FVDs)
|
||||
samp_interval = 3 # control the density of anisotropic arrows
|
||||
width = 0.06 # width of the anisotropic arrow
|
||||
ani_per_1 = 0.01; ani_per_2 = 0.05; scale = 0.5; basic = 0.1 # control the length of anisotropic arrows related to the amplitude of anisotropy. length = 0.1 + (amplitude - ani_per_1) / (ani_per_2 - ani_per_1) * scale
|
||||
ani_thd = ani_per_1 # if the amplitude of anisotropy is smaller than ani_thd, no anisotropic arrow will be plotted
|
||||
|
||||
phi = ckb_model.interp_dep(depth, field='phi', samp_interval=samp_interval) # phi_inv[i,:] are (lon, lat, phi)
|
||||
epsilon = ckb_model.interp_dep(depth, field='epsilon', samp_interval=samp_interval) # epsilon_inv[i,:] are (lon, lat, epsilon)
|
||||
ani_lon = phi[:,0].reshape(-1,1)
|
||||
ani_lat = phi[:,1].reshape(-1,1)
|
||||
ani_phi = phi[:,2].reshape(-1,1)
|
||||
length = ((epsilon[:,2] - ani_per_1) / (ani_per_2 - ani_per_1) * scale + basic).reshape(-1,1)
|
||||
ani_arrow = np.hstack([ani_lon, ani_lat, ani_phi, length, np.ones((ani_lon.size,1))*width]) # lon, lat, color, angle[-90,90], length, width
|
||||
|
||||
# remove arrows with small amplitude of anisotropy
|
||||
idx = np.where(epsilon[:,2] > ani_thd)[0] # indices of arrows with large enough amplitude of anisotropy
|
||||
ani_arrow = ani_arrow[idx,:] # remove arrows with small amplitude of anisotropy
|
||||
|
||||
# plot anisotropic arrows
|
||||
fig.plot(ani_arrow, style='j', fill='yellow1', pen='0.5p,black') # plot fast velocity direction
|
||||
|
||||
|
||||
# colorbar
|
||||
fig.shift_origin(xshift=0, yshift=-1.5)
|
||||
fig.colorbar(frame = ["a10","y+ldlnVp (%)"], position="+e+w4c/0.3c+h")
|
||||
fig.shift_origin(xshift=0, yshift= 1.5)
|
||||
|
||||
# ------------ plot horivertical profile of velocity ------------
|
||||
fig.shift_origin(xshift=11, yshift= 0)
|
||||
|
||||
region = [0, 40, 0, 1] # region of interest
|
||||
fig.basemap(region=region, frame=["xa20+lDepth (km)","ya1+lLatitude","nSwE"], projection="X3c/5c") # base map
|
||||
|
||||
start = [0.875,0]; end = [0.875,1]; gap = 1
|
||||
prof_init = init_model.interp_sec(start, end, field='vel', val = gap) # prof_init[i,:] are (lon, lat, dis, dep, vel)
|
||||
prof_ckb = ckb_model.interp_sec(start, end, field='vel', val = gap) # prof_ckb[i,:] are (lon, lat, dis, dep, vel)
|
||||
lat = prof_init[:,1] # lat
|
||||
dep = prof_init[:,3] # depth
|
||||
vel = (prof_ckb[:,4] - prof_init[:,4])/prof_init[:,4] * 100 # velocity perturbation related to initial model
|
||||
|
||||
grid = pygmt.surface(x=dep, y=lat, z=vel, spacing="1/0.01",region=region)
|
||||
|
||||
fig.grdimage(grid = grid) # plot figure
|
||||
|
||||
fig.savefig("figs/checkerboard_velocity.png") # save figure
|
||||
fig.show()
|
||||
|
||||
|
||||
# %% [markdown]
|
||||
# # eg4. plot flexible checkerboard model
|
||||
|
||||
# %%
|
||||
# ---------------- read model files ----------------
|
||||
# file names
|
||||
init_model_file = 'models/linear_velocity_N61_51_101_PyTomoATT.h5' # initial model file
|
||||
ckb_model_file = 'models/linear_velocity_ckb_flex_N61_51_101.h5' # checkerboard model file
|
||||
par_file = 'input_params/input_params.yaml' # parameter file
|
||||
|
||||
# read initial and final model file
|
||||
att_model = ATTModel.read(init_model_file, par_file)
|
||||
init_model = att_model.to_xarray()
|
||||
|
||||
att_model = ATTModel.read(ckb_model_file, par_file)
|
||||
ckb_model = att_model.to_xarray()
|
||||
|
||||
# # interp vel at depth = 20 km
|
||||
# depth = 20.0
|
||||
# vel_init = init_model.interp_dep(depth, field='vel') # vel_init[i,:] are (lon, lat, vel)
|
||||
# vel_ckb = ckb_model.interp_dep(depth, field='vel') # vel_ckb[i,:] are (lon, lat, vel)
|
||||
|
||||
# ----------------- pygmt plot ------------------
|
||||
|
||||
fig = pygmt.Figure()
|
||||
pygmt.makecpt(cmap="../utils/svel13_chen.cpt", series=[-10,10], background=True, reverse=False) # colorbar
|
||||
|
||||
|
||||
for depth in [4,14,28]:
|
||||
# ------------ plot horizontal profile of velocity ------------
|
||||
region = [0, 2, 0, 1] # region of interest
|
||||
fig.basemap(region=region, frame=["xa1","ya1","NsEw"], projection="M10c") # base map
|
||||
|
||||
# velocity perturbation at depth = 15 km
|
||||
prof_init = init_model.interp_dep(depth, field='vel') # prof_init[i,:] are (lon, lat, vel)
|
||||
prof_ckb = ckb_model.interp_dep(depth, field='vel') # prof_ckb[i,:] are (lon, lat, vel)
|
||||
lon = prof_init[:,0] # longitude
|
||||
lat = prof_init[:,1] # latitude
|
||||
vel_pert = (prof_ckb[:,2] - prof_init[:,2])/prof_init[:,2] * 100 # velocity perturbation related to initial model
|
||||
|
||||
grid = pygmt.surface(x=lon, y=lat, z=vel_pert, spacing=0.01,region=region)
|
||||
|
||||
fig.grdimage(grid = grid) # plot figure
|
||||
|
||||
# fast velocity directions (FVDs)
|
||||
samp_interval = 3 # control the density of anisotropic arrows
|
||||
width = 0.06 # width of the anisotropic arrow
|
||||
ani_per_1 = 0.01; ani_per_2 = 0.05; scale = 0.5; basic = 0.1 # control the length of anisotropic arrows related to the amplitude of anisotropy. length = 0.1 + (amplitude - ani_per_1) / (ani_per_2 - ani_per_1) * scale
|
||||
ani_thd = ani_per_1 # if the amplitude of anisotropy is smaller than ani_thd, no anisotropic arrow will be plotted
|
||||
|
||||
phi = ckb_model.interp_dep(depth, field='phi', samp_interval=samp_interval) # phi_inv[i,:] are (lon, lat, phi)
|
||||
epsilon = ckb_model.interp_dep(depth, field='epsilon', samp_interval=samp_interval) # epsilon_inv[i,:] are (lon, lat, epsilon)
|
||||
ani_lon = phi[:,0].reshape(-1,1)
|
||||
ani_lat = phi[:,1].reshape(-1,1)
|
||||
ani_phi = phi[:,2].reshape(-1,1)
|
||||
length = ((epsilon[:,2] - ani_per_1) / (ani_per_2 - ani_per_1) * scale + basic).reshape(-1,1)
|
||||
ani_arrow = np.hstack([ani_lon, ani_lat, ani_phi, length, np.ones((ani_lon.size,1))*width]) # lon, lat, color, angle[-90,90], length, width
|
||||
|
||||
# remove arrows with small amplitude of anisotropy
|
||||
idx = np.where(epsilon[:,2] > ani_thd)[0] # indices of arrows with large enough amplitude of anisotropy
|
||||
ani_arrow = ani_arrow[idx,:] # remove arrows with small amplitude of anisotropy
|
||||
|
||||
# plot anisotropic arrows
|
||||
fig.plot(ani_arrow, style='j', fill='yellow1', pen='0.5p,black') # plot fast velocity direction
|
||||
|
||||
fig.text(text="%d km"%(depth), x = 0.2 , y = 0.1, font = "14p,Helvetica-Bold,black", fill = "white")
|
||||
|
||||
# plot vertical profile
|
||||
fig.plot(x=[0.9, 0.9], y=[0, 1], pen="2p,black,-") # vertical line
|
||||
|
||||
fig.shift_origin(xshift=0, yshift=-6)
|
||||
|
||||
# colorbar
|
||||
fig.shift_origin(xshift=0, yshift= 4.5)
|
||||
fig.colorbar(frame = ["a10","y+ldlnVp (%)"], position="+e+w4c/0.3c+h")
|
||||
fig.shift_origin(xshift=0, yshift= 1.5)
|
||||
|
||||
# ------------ plot horivertical profile of velocity ------------
|
||||
fig.shift_origin(xshift=11, yshift= 0)
|
||||
|
||||
region = [0, 40, 0, 1] # region of interest
|
||||
fig.basemap(region=region, frame=["xa20+lDepth (km)","ya1+lLatitude","nSwE"], projection="X3c/5c") # base map
|
||||
|
||||
start = [0.9,0]; end = [0.9,1]; gap = 1
|
||||
prof_init = init_model.interp_sec(start, end, field='vel', val = gap) # prof_init[i,:] are (lon, lat, dis, dep, vel)
|
||||
prof_ckb = ckb_model.interp_sec(start, end, field='vel', val = gap) # prof_ckb[i,:] are (lon, lat, dis, dep, vel)
|
||||
lat = prof_init[:,1] # lat
|
||||
dep = prof_init[:,3] # depth
|
||||
vel = (prof_ckb[:,4] - prof_init[:,4])/prof_init[:,4] * 100 # velocity perturbation related to initial model
|
||||
|
||||
grid = pygmt.surface(x=dep, y=lat, z=vel, spacing="1/0.01",region=region)
|
||||
|
||||
fig.grdimage(grid = grid) # plot figure
|
||||
|
||||
fig.savefig("figs/flexible_checkerboard_velocity.png") # save figure
|
||||
fig.show()
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,50 @@
|
||||
version: 3
|
||||
|
||||
#################################################
|
||||
# computational domian #
|
||||
#################################################
|
||||
domain:
|
||||
min_max_dep: [-10, 50] # depth in km
|
||||
min_max_lat: [0, 1] # latitude in degree
|
||||
min_max_lon: [0, 2] # longitude in degree
|
||||
n_rtp: [61, 51, 101] # number of nodes in depth,latitude,longitude direction
|
||||
|
||||
#################################################
|
||||
# traveltime data file path #
|
||||
#################################################
|
||||
source:
|
||||
src_rec_file: 1_src_rec_files/src_rec_config.dat # source receiver file path
|
||||
swap_src_rec: true # swap source and receiver
|
||||
|
||||
#################################################
|
||||
# initial model file path #
|
||||
#################################################
|
||||
model:
|
||||
init_model_path: 2_models/model_ckb_N61_51_101.h5 # path to initial model file
|
||||
|
||||
#################################################
|
||||
# parallel computation settings #
|
||||
#################################################
|
||||
parallel: # parameters for parallel computation
|
||||
n_sims: 8 # number of simultanoues runs (parallel the sources)
|
||||
ndiv_rtp: [1, 1, 1] # number of subdivision on each direction (parallel the computional domain)
|
||||
|
||||
############################################
|
||||
# output file setting #
|
||||
############################################
|
||||
output_setting:
|
||||
output_dir: OUTPUT_FILES/OUTPUT_FILES_signal # path to output director (default is ./OUTPUT_FILES/)
|
||||
output_final_model: true # output merged final model (final_model.h5) or not.
|
||||
output_in_process: false # output model at each inv iteration or not.
|
||||
output_in_process_data: false # output src_rec_file at each inv iteration or not.
|
||||
output_file_format: 0
|
||||
|
||||
#################################################
|
||||
# inversion or forward modeling #
|
||||
#################################################
|
||||
# run mode
|
||||
# 0 for forward simulation only,
|
||||
# 1 for inversion
|
||||
# 2 for earthquake relocation
|
||||
# 3 for inversion + earthquake relocation
|
||||
run_mode: 0
|
||||
13
examples/scripts_of_generate_hdf5_model/run_this_example.sh
Normal file
13
examples/scripts_of_generate_hdf5_model/run_this_example.sh
Normal file
@@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
|
||||
# run the script to generate HDF5 model files for TomoATT. Four models will be generated for TomoATT
|
||||
# 1. constant velocity model
|
||||
# 2. linear velocity model
|
||||
# 3. regular checkerboard model based on the linear velocity model
|
||||
# 4. flexible checkerboard model based on the linear velocity model
|
||||
|
||||
python 1_generate_models.py
|
||||
|
||||
# run the script to plot the generated models (optional)
|
||||
|
||||
python 2_plot_models.py
|
||||
173
examples/scripts_of_plotting/1_plot_model.py
Normal file
173
examples/scripts_of_plotting/1_plot_model.py
Normal file
@@ -0,0 +1,173 @@
|
||||
# %%
|
||||
import pygmt
|
||||
pygmt.config(FONT="16p", IO_SEGMENT_MARKER="<<<")
|
||||
|
||||
from pytomoatt.model import ATTModel
|
||||
from pytomoatt.data import ATTData
|
||||
import numpy as np
|
||||
|
||||
# %%
|
||||
# (Option 1) plot the final model after inversion
|
||||
|
||||
# Need to set "output_final_model" as "True" in the input_params.yaml file
|
||||
|
||||
# ---------------- read model files ----------------
|
||||
# file names
|
||||
init_model_file = 'input_files/model_init_N61_61_61.h5' # initial model file
|
||||
inv_model_file = 'OUTPUT_FILES/final_model.h5' # final model file
|
||||
par_file = 'input_files/input_params.yaml' # parameter file
|
||||
|
||||
# read initial and final model file
|
||||
model = ATTModel.read(init_model_file, par_file)
|
||||
init_model = model.to_xarray()
|
||||
|
||||
model = ATTModel.read(inv_model_file, par_file)
|
||||
inv_model = model.to_xarray()
|
||||
|
||||
|
||||
# %%
|
||||
# # (Option 2) plot the model at the XX iteration
|
||||
|
||||
# # Need to set "output_middle_model" as "True" in the input_params.yaml file
|
||||
|
||||
# # ---------------- read model files ----------------
|
||||
# # file names
|
||||
# init_model_file = 'input_files/model_init_N61_61_61.h5' # initial model file
|
||||
# inv_model_file = 'OUTPUT_FILES/middle_model_step_0007.h5' # final model file
|
||||
# par_file = 'input_files/input_params.yaml' # parameter file
|
||||
|
||||
# # read initial and final model file
|
||||
# model = ATTModel.read(init_model_file, par_file)
|
||||
# init_model = model.to_xarray()
|
||||
|
||||
# model = ATTModel.read(inv_model_file, par_file)
|
||||
# inv_model = model.to_xarray()
|
||||
|
||||
# %%
|
||||
import os
|
||||
try:
|
||||
os.mkdir('img')
|
||||
except:
|
||||
pass
|
||||
|
||||
# %%
|
||||
# ---------------- access 3D model parameters ----------------
|
||||
|
||||
# we can access 3D dataset with keys:
|
||||
# 1. "vel" for velocity
|
||||
# 2. "phi" fast velocity direction, anti-clock angle w.r.t the east direction. (only available for anisotropic model)
|
||||
# 3. "epsilon" for anisotropic magnitude (only available for anisotropic model)
|
||||
# 4. "xi" and "eta" for anisotropic parameters: xi = epsilon * cos(phi), eta = epsilon * sin(phi)
|
||||
vel_3d_array = inv_model["vel"]
|
||||
phi_3d_array = inv_model["phi"]
|
||||
epsilon_3d_array = inv_model["epsilon"]
|
||||
xi_3d_array = inv_model["xi"]
|
||||
eta_3d_array = inv_model["eta"]
|
||||
|
||||
print("3D array of model parameters. \n vel: ", vel_3d_array.shape, " \n phi: ", phi_3d_array.shape,
|
||||
" \n epsilon: ", epsilon_3d_array.shape, " \n xi: ", xi_3d_array.shape, " \n eta: ", eta_3d_array.shape)
|
||||
|
||||
# %%
|
||||
# ---------------- 2D depth profile of velocity perturbation ----------------
|
||||
|
||||
# interp vel at depth = 20 km
|
||||
depth = 20.0
|
||||
vel_init = init_model.interp_dep(depth, field='vel') # vel_init[i,:] are (lon, lat, vel)
|
||||
vel_inv = inv_model.interp_dep(depth, field='vel')
|
||||
|
||||
print("vel_depth at depth = ", depth, " km. vel_depth:", vel_init.shape, ", (lon, lat, vel)")
|
||||
|
||||
# plot
|
||||
fig = pygmt.Figure()
|
||||
fig.basemap(region=[0,2,0,2], frame=["xa1","ya1","+tVelocity perturbation"], projection="M10c") # base map
|
||||
pygmt.makecpt(cmap="../utils/svel13_chen.cpt", series=[-20, 20], background=True, reverse=False) # colorbar
|
||||
|
||||
x = vel_init[:,0]; # longitude
|
||||
y = vel_init[:,1]; # latitude
|
||||
value = (vel_inv[:,2] - vel_init[:,2])/vel_init[:,2] * 100 # velocity perturbation relative to the initial model
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing=0.04,region=[0,2,0,2])
|
||||
|
||||
fig.grdimage(grid = grid) # plot figure
|
||||
fig.text(text="%d km"%(depth), x = 0.2 , y = 0.1, font = "14p,Helvetica-Bold,black", fill = "white")
|
||||
|
||||
# colorbar
|
||||
fig.shift_origin(xshift=0, yshift=-1.5)
|
||||
fig.colorbar(frame = ["a20","y+ldlnVp (%)"], position="+e+w4c/0.3c+h")
|
||||
|
||||
fig.savefig("img/1_dep_vel.png")
|
||||
|
||||
# %%
|
||||
# ---------------- 2D depth profile of azimuthal anisotropy ----------------
|
||||
|
||||
# interp magnitude of anisotropy at depth = 20 km
|
||||
depth = 20.0
|
||||
epsilon_inv = inv_model.interp_dep(depth, field='epsilon') # epsilon_inv[i,:] are (lon, lat, epsilon)
|
||||
|
||||
print("epsilon_inv at depth = ", depth, " km. epsilon_inv:", epsilon_inv.shape, ", (lon, lat, epsilon)")
|
||||
|
||||
# generate fast velocity direction (anisotropic arrow)
|
||||
samp_interval = 3
|
||||
length = 10
|
||||
width = 0.1
|
||||
ani_thd = 0.02
|
||||
ani_phi = inv_model.interp_dep(depth, field='phi', samp_interval=samp_interval)
|
||||
ani_epsilon = inv_model.interp_dep(depth, field='epsilon', samp_interval=samp_interval)
|
||||
ani_arrow = np.hstack([ani_phi, ani_epsilon[:,2].reshape(-1, 1)*length, np.ones((ani_epsilon.shape[0],1))*width]) # lon, lat, angle, length, width
|
||||
idx = np.where(ani_epsilon[:,2] > ani_thd)
|
||||
ani_arrow = ani_arrow[idx[0],:]
|
||||
|
||||
print("ani_arrow at depth = ", depth, " km. ani_arrow:", ani_arrow.shape, ", (lon, lat, angle, length, width)")
|
||||
|
||||
|
||||
# plot
|
||||
fig = pygmt.Figure()
|
||||
fig.basemap(region=[0,2,0,2], frame=["xa1","ya1","+tAzimuthal Anisotropy"], projection="M10c") # base map
|
||||
pygmt.makecpt(cmap="cool", series=[0, 0.1], background=True, reverse=False) # colorbar
|
||||
|
||||
x = epsilon_inv[:,0]; # longitude
|
||||
y = epsilon_inv[:,1]; # latitude
|
||||
value = epsilon_inv[:,2] # magnitude of anisotropy
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing=0.04,region=[0,2,0,2])
|
||||
|
||||
fig.grdimage(grid = grid) # plot magnitude of anisotropy
|
||||
fig.plot(ani_arrow, style='j', fill='yellow1', pen='0.5p,black') # plot fast velocity direction
|
||||
|
||||
fig.text(text="%d km"%(depth), x = 0.2 , y = 0.1, font = "14p,Helvetica-Bold,black", fill = "white")
|
||||
|
||||
# colorbar
|
||||
fig.shift_origin(xshift=0, yshift=-1.5)
|
||||
fig.colorbar(frame = ["a0.1","y+lAnisotropy"], position="+e+w4c/0.3c+h")
|
||||
|
||||
fig.savefig("img/1_dep_ani.png")
|
||||
|
||||
# %%
|
||||
# ---------------- 2D vertical profile of velocity perturbation ----------------
|
||||
|
||||
# interp vel from [0,0.75] in lon-lat to [2,0.75] in lon-lat, gap = 1 km
|
||||
start = [0,0.75]; end = [2,0.75]; gap = 1
|
||||
vel_init_sec = init_model.interp_sec(start, end, field='vel', val = gap) # vel_init_sec[i,:] are (lon, lat, dis, dep, vel)
|
||||
vel_inv_sec = inv_model.interp_sec(start, end, field='vel', val = gap)
|
||||
|
||||
print("vel_init_sec:", vel_init_sec.shape, ", (lon, lat, distance, depth, vel)")
|
||||
|
||||
# plot
|
||||
fig = pygmt.Figure()
|
||||
fig.basemap(region=[0,2,0,40], frame=["xa1+lLongitude","ya20+lDepth (km)","+tVelocity perturbation"], projection="X10c/-4c") # base map
|
||||
pygmt.makecpt(cmap="../utils/svel13_chen.cpt", series=[-20, 20], background=True, reverse=False) # colorbar
|
||||
|
||||
x = vel_init_sec[:,0]; # longitude
|
||||
y = vel_init_sec[:,3]; # depth
|
||||
value = (vel_inv_sec[:,4] - vel_init_sec[:,4])/vel_init_sec[:,4] * 100 # velocity perturbation relative to the initial model
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing="0.04/1",region=[0,2,0,40])
|
||||
|
||||
fig.grdimage(grid = grid) # plot figure
|
||||
fig.text(text="A", x = 0.1 , y = 5, font = "14p,Helvetica-Bold,black", fill = "white")
|
||||
fig.text(text="A@+'@+", x = 1.9 , y = 5, font = "14p,Helvetica-Bold,black", fill = "white")
|
||||
|
||||
# colorbar
|
||||
fig.shift_origin(xshift=0, yshift=-2)
|
||||
fig.colorbar(frame = ["a20","y+ldlnVp (%)"], position="+e+w4c/0.3c+h")
|
||||
|
||||
fig.savefig("img/1_sec_vel.png")
|
||||
|
||||
|
||||
187
examples/scripts_of_plotting/2_plot_time_field.py
Normal file
187
examples/scripts_of_plotting/2_plot_time_field.py
Normal file
@@ -0,0 +1,187 @@
|
||||
# %%
|
||||
import pygmt
|
||||
pygmt.config(FONT="16p", IO_SEGMENT_MARKER="<<<")
|
||||
|
||||
from pytomoatt.model import ATTModel
|
||||
from pytomoatt.data import ATTData
|
||||
import numpy as np
|
||||
|
||||
# %%
|
||||
# plot the traveltime field and adjoint field of the source at the XX iteration
|
||||
# 1. set "output_source_field" to be "True" in the input_params.yaml file
|
||||
|
||||
# Because source parallelizaion is used, the source field is only stored in one out_data_sim_group_XX.h5 file.
|
||||
# For example, if we use 8 processors for source parallelization, we have
|
||||
# src_JC00 is stored in out_data_sim_group_0.h5 file.
|
||||
# src_JC05 is stored in out_data_sim_group_5.h5 file.
|
||||
# src_JC07 is stored in out_data_sim_group_7.h5 file.
|
||||
# src_JC08 is stored in out_data_sim_group_0.h5 file.
|
||||
# src_JC09 is stored in out_data_sim_group_1.h5 file.
|
||||
# src_JC10 is stored in out_data_sim_group_2.h5 file.
|
||||
# ...
|
||||
# src_JC24 is stored in out_data_sim_group_0.h5 file.
|
||||
|
||||
# ---------------- read files ----------------
|
||||
src_name = 'JC05'
|
||||
Nstep = "0007"
|
||||
|
||||
# file names
|
||||
data_file = 'OUTPUT_FILES/out_data_sim_group_5.h5' # data file
|
||||
par_file = 'input_files/input_params.yaml' # parameter file
|
||||
grid_file = 'OUTPUT_FILES/out_data_grid.h5' # grid file
|
||||
group = 'src_%s'%(src_name) # src_${src_name}
|
||||
|
||||
# read traveltime field
|
||||
dataset_time = 'time_field_inv_%s'%(Nstep) # time_field_inv_${Nstep}
|
||||
data = ATTData.read(data_file, par_file, grid_file, group, dataset_time)
|
||||
time_field = data.to_xarray()
|
||||
|
||||
# read adjoint field
|
||||
dataset_adjoint = 'adjoint_field_inv_%s'%(Nstep) # adjoint_field_inv_${Nstep}
|
||||
data = ATTData.read(data_file, par_file, grid_file, group, dataset_adjoint)
|
||||
adjoint_field = data.to_xarray()
|
||||
|
||||
|
||||
# %%
|
||||
import os
|
||||
try:
|
||||
os.mkdir('img')
|
||||
except:
|
||||
pass
|
||||
|
||||
# %%
|
||||
# ---------------- access 3D time field and adjoint field ----------------
|
||||
# we can access 3D dataset:
|
||||
|
||||
dep_1d_array = time_field["dep"]
|
||||
lat_1d_array = time_field["lat"]
|
||||
lon_1d_array = time_field["lon"]
|
||||
|
||||
print("3D array of coordinates. \n dep: ", dep_1d_array.shape, " \n lat: ", lat_1d_array.shape, " \n lon: ", lon_1d_array.shape)
|
||||
|
||||
time_3d_array = time_field[dataset_time]
|
||||
adjoint_3d_array = adjoint_field[dataset_adjoint]
|
||||
|
||||
print("3D array of fields. \n time: ", time_3d_array.shape, " \n adjoint: ", adjoint_3d_array.shape)
|
||||
|
||||
# %%
|
||||
# ---------------- 2D depth profile of time field ----------------
|
||||
|
||||
# interp vel at depth = 20 km
|
||||
depth = 20.0
|
||||
time = time_field.interp_dep(depth, field=dataset_time) # time[i,:] are (lon, lat, vel)
|
||||
|
||||
print("time at depth = ", depth, " km. time:", time.shape, ", (lon, lat, time)")
|
||||
|
||||
# plot
|
||||
fig = pygmt.Figure()
|
||||
fig.basemap(region=[0,2,0,2], frame=["xa1","ya1","+tTraveltime"], projection="M10c") # base map
|
||||
pygmt.makecpt(cmap="jet", series=[0, 30], background=True, reverse=True) # colorbar
|
||||
|
||||
x = time[:,0]; # longitude
|
||||
y = time[:,1]; # latitude
|
||||
value = time[:,2] # traveltime
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing=0.04,region=[0,2,0,2])
|
||||
|
||||
fig.grdimage(grid = grid) # plot figure
|
||||
fig.contour(x=x, y=y, z=value, levels=5, pen="1.5p,white") # contour
|
||||
fig.text(text="%d km"%(depth), x = 0.2 , y = 0.1, font = "14p,Helvetica-Bold,black", fill = "white")
|
||||
|
||||
# colorbar
|
||||
fig.shift_origin(xshift=0, yshift=-1.5)
|
||||
fig.colorbar(frame = ["a20","y+lTraveltime (s)"], position="+e+w4c/0.3c+h")
|
||||
|
||||
fig.savefig("img/2_dep_time.png")
|
||||
|
||||
# %%
|
||||
# ---------------- 2D depth profile of adjoint field ----------------
|
||||
|
||||
# interp vel at depth = 20 km
|
||||
depth = 20.0
|
||||
adjoint = adjoint_field.interp_dep(depth, field=dataset_adjoint)
|
||||
|
||||
print("time at depth = ", depth, " km. time:", time.shape, ", (lon, lat, time)")
|
||||
|
||||
# plot
|
||||
fig = pygmt.Figure()
|
||||
fig.basemap(region=[0,2,0,2], frame=["xa1","ya1","+tAdjoint field"], projection="M10c") # base map
|
||||
pygmt.makecpt(cmap="jet", series=[-0.5, 0.5], background=True, reverse=False) # colorbar
|
||||
|
||||
x = time[:,0]; # longitude
|
||||
y = time[:,1]; # latitude
|
||||
value = adjoint[:,2] # traveltime
|
||||
value = value/np.nanmax(np.abs(value))
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing=0.04,region=[0,2,0,2])
|
||||
|
||||
fig.grdimage(grid = grid) # plot figure
|
||||
fig.contour(x=x, y=y, z=time[:,2], levels=5, pen="1.5p,white") # contour
|
||||
fig.text(text="%d km"%(depth), x = 0.2 , y = 0.1, font = "14p,Helvetica-Bold,black", fill = "white")
|
||||
|
||||
# colorbar
|
||||
fig.shift_origin(xshift=0, yshift=-1.5)
|
||||
fig.colorbar(frame = ["a0.5","y+lAdjoint field"], position="+e+w4c/0.3c+h")
|
||||
|
||||
fig.savefig("img/2_dep_adjoint.png")
|
||||
|
||||
# %%
|
||||
# ---------------- 2D vertical profile of traveltime field ----------------
|
||||
|
||||
# interp from [0,0.6] in lon-lat to [2,0.6] in lon-lat, gap = 1 km
|
||||
start = [0,0.6]; end = [2,0.6]; gap = 1
|
||||
time_sec = time_field.interp_sec(start, end, field=dataset_time, val = gap) # time_sec[i,:] are (lon, lat, dis, dep, time)
|
||||
|
||||
print("time_sec:", time_sec.shape, ", (lon, lat, distance, depth, time)")
|
||||
|
||||
# plot
|
||||
fig = pygmt.Figure()
|
||||
fig.basemap(region=[0,2,0,40], frame=["xa1+lLongitude","ya20+lDepth (km)","+tTraveltime"], projection="X10c/-2c") # base map
|
||||
pygmt.makecpt(cmap="jet", series=[0, 30], background=True, reverse=True) # colorbar
|
||||
|
||||
x = time_sec[:,0]; # longitude
|
||||
y = time_sec[:,3]; # depth
|
||||
value = time_sec[:,4] # traveltime
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing="0.04/1",region=[0,2,0,40])
|
||||
|
||||
fig.grdimage(grid = grid) # plot figure
|
||||
fig.contour(x=x, y=y, z=value, levels=5, pen="1.5p,white") # contour
|
||||
fig.text(text="A", x = 0.1 , y = 5, font = "14p,Helvetica-Bold,black", fill = "white")
|
||||
fig.text(text="A@+'@+", x = 1.9 , y = 5, font = "14p,Helvetica-Bold,black", fill = "white")
|
||||
|
||||
# colorbar
|
||||
fig.shift_origin(xshift=0, yshift=-2)
|
||||
fig.colorbar(frame = ["a20","y+lTraveltime (s)"], position="+e+w4c/0.3c+h")
|
||||
|
||||
fig.savefig("img/2_sec_time.png")
|
||||
|
||||
# %%
|
||||
# ---------------- 2D vertical profile of adjoint field ----------------
|
||||
|
||||
# interp from [0,0.6] in lon-lat to [2,0.6] in lon-lat, gap = 1 km
|
||||
start = [0,0.6]; end = [2,0.6]; gap = 1
|
||||
adjoint_sec = adjoint_field.interp_sec(start, end, field=dataset_adjoint, val = gap)
|
||||
|
||||
print("adjoint_sec:", time_sec.shape, ", (lon, lat, distance, depth, adjoint)")
|
||||
|
||||
# plot
|
||||
fig = pygmt.Figure()
|
||||
fig.basemap(region=[0,2,0,40], frame=["xa1+lLongitude","ya20+lDepth (km)","+tAdjoint field"], projection="X10c/-2c") # base map
|
||||
pygmt.makecpt(cmap="jet", series=[-0.5, 0.5], background=True, reverse=False) # colorbar
|
||||
|
||||
x = adjoint_sec[:,0]; # longitude
|
||||
y = adjoint_sec[:,3]; # depth
|
||||
value = adjoint_sec[:,4] # traveltime
|
||||
value = value/np.nanmax(np.abs(value))
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing="0.04/1",region=[0,2,0,40])
|
||||
|
||||
fig.grdimage(grid = grid) # plot figure
|
||||
fig.contour(x=x, y=y, z=time_sec[:,4], levels=5, pen="1.5p,white") # contour
|
||||
fig.text(text="A", x = 0.1 , y = 5, font = "14p,Helvetica-Bold,black", fill = "white")
|
||||
fig.text(text="A@+'@+", x = 1.9 , y = 5, font = "14p,Helvetica-Bold,black", fill = "white")
|
||||
|
||||
# colorbar
|
||||
fig.shift_origin(xshift=0, yshift=-2)
|
||||
fig.colorbar(frame = ["a0.5","y+lAdjoint"], position="+e+w4c/0.3c+h")
|
||||
|
||||
fig.savefig("img/2_sec_adjoint.png")
|
||||
|
||||
|
||||
189
examples/scripts_of_plotting/3_plot_kernel.py
Normal file
189
examples/scripts_of_plotting/3_plot_kernel.py
Normal file
@@ -0,0 +1,189 @@
|
||||
# %%
|
||||
import pygmt
|
||||
pygmt.config(FONT="16p", IO_SEGMENT_MARKER="<<<")
|
||||
|
||||
from pytomoatt.model import ATTModel
|
||||
from pytomoatt.data import ATTData
|
||||
import numpy as np
|
||||
|
||||
# %%
|
||||
# plot sensitivity kernel at the XX iteration
|
||||
# 1. set "output_kernel" to be "True" in the input_params.yaml file
|
||||
|
||||
# ---------------- read files ----------------
|
||||
Nstep = "0007"
|
||||
kernel_list = {} # dictionary to store all the kernels
|
||||
|
||||
# file names
|
||||
data_file = 'OUTPUT_FILES/out_data_sim_group_0.h5' # data file
|
||||
par_file = 'input_files/input_params.yaml' # parameter file
|
||||
grid_file = 'OUTPUT_FILES/out_data_grid.h5' # grid file
|
||||
group = 'model'
|
||||
|
||||
# (Option 1) read original sensitivity kernel
|
||||
# Ks: kernel w.r.t. slowness at the 7-th iteration
|
||||
dataset = 'Ks_inv_%s'%(Nstep)
|
||||
data = ATTData.read(data_file, par_file, grid_file, group, dataset)
|
||||
kernel_list[dataset] = data.to_xarray()
|
||||
|
||||
# Kxi: kernel w.r.t. xi (anisotropic parameter) at the 7-th iteration
|
||||
dataset = 'Kxi_inv_%s'%(Nstep)
|
||||
data = ATTData.read(data_file, par_file, grid_file, group, dataset)
|
||||
kernel_list[dataset] = data.to_xarray()
|
||||
|
||||
# Keta: kernel w.r.t. eta (anisotropic parameter) at the 7-th iteration
|
||||
dataset = 'Keta_inv_%s'%(Nstep)
|
||||
data = ATTData.read(data_file, par_file, grid_file, group, dataset)
|
||||
kernel_list[dataset] = data.to_xarray()
|
||||
|
||||
# %%
|
||||
# (Option 2) read kernel_density
|
||||
# Ks_den: kernel density w.r.t. slowness at the 7-th iteration
|
||||
dataset = 'Ks_density_inv_%s'%(Nstep)
|
||||
data = ATTData.read(data_file, par_file, grid_file, group, dataset)
|
||||
kernel_list[dataset] = data.to_xarray()
|
||||
|
||||
# Kxi_den: kernel density w.r.t. xi (anisotropic parameter) at the 7-th iteration
|
||||
dataset = 'Kxi_density_inv_%s'%(Nstep)
|
||||
data = ATTData.read(data_file, par_file, grid_file, group, dataset)
|
||||
kernel_list[dataset] = data.to_xarray()
|
||||
|
||||
# Keta_den: kernel density w.r.t. eta (anisotropic parameter) at the 7-th iteration
|
||||
dataset = 'Keta_density_inv_%s'%(Nstep)
|
||||
data = ATTData.read(data_file, par_file, grid_file, group, dataset)
|
||||
kernel_list[dataset] = data.to_xarray()
|
||||
|
||||
# %%
|
||||
# kernel density normalization is performed after smoothing. So tag 'Ks_over_Kden_inv_%s'%(Nstep) is not available for version > 1.0.3
|
||||
# # (Option 3) read normalized kernel, K/(k_den)^\zeta
|
||||
# dataset = 'Ks_over_Kden_inv_%s'%(Nstep)
|
||||
# data = ATTData.read(data_file, par_file, grid_file, group, dataset)
|
||||
# kernel_list[dataset] = data.to_xarray()
|
||||
|
||||
# # Kxi_norm: normalized kernel w.r.t. xi (anisotropic parameter) at the 7-th iteration
|
||||
# dataset = 'Kxi_over_Kden_inv_%s'%(Nstep)
|
||||
# data = ATTData.read(data_file, par_file, grid_file, group, dataset)
|
||||
# kernel_list[dataset] = data.to_xarray()
|
||||
|
||||
# # Keta_norm: normalized kernel w.r.t. eta (anisotropic parameter) at the 7-th iteration
|
||||
# dataset = 'Keta_over_Kden_inv_%s'%(Nstep)
|
||||
# data = ATTData.read(data_file, par_file, grid_file, group, dataset)
|
||||
# kernel_list[dataset] = data.to_xarray()
|
||||
|
||||
# %%
|
||||
# this part works for version > 1.0.3
|
||||
# # (Option 3) read kernel density smoothed by multigrid parameterization,
|
||||
# dataset = 'Ks_density_update_inv_%s'%(Nstep)
|
||||
# data = ATTData.read(data_file, par_file, grid_file, group, dataset)
|
||||
# kernel_list[dataset] = data.to_xarray()
|
||||
|
||||
# # Kxi_norm: normalized kernel w.r.t. xi (anisotropic parameter) at the 7-th iteration
|
||||
# dataset = 'Kxi_density_update_inv_%s'%(Nstep)
|
||||
# data = ATTData.read(data_file, par_file, grid_file, group, dataset)
|
||||
# kernel_list[dataset] = data.to_xarray()
|
||||
|
||||
# # Keta_norm: normalized kernel w.r.t. eta (anisotropic parameter) at the 7-th iteration
|
||||
# dataset = 'Keta_density_update_inv_%s'%(Nstep)
|
||||
# data = ATTData.read(data_file, par_file, grid_file, group, dataset)
|
||||
# kernel_list[dataset] = data.to_xarray()
|
||||
|
||||
# %%
|
||||
# (Option 4) read normalized kernel smoothed by multigrid parameterization
|
||||
# Ks_update: smoothed normalized kernel w.r.t. slowness at the 7-th iteration
|
||||
dataset = 'Ks_update_inv_%s'%(Nstep)
|
||||
data = ATTData.read(data_file, par_file, grid_file, group, dataset)
|
||||
kernel_list[dataset] = data.to_xarray()
|
||||
|
||||
# Kxi_update: smoothed normalized kernel w.r.t. xi (anisotropic parameter) at the 7-th iteration
|
||||
dataset = 'Kxi_update_inv_%s'%(Nstep)
|
||||
data = ATTData.read(data_file, par_file, grid_file, group, dataset)
|
||||
kernel_list[dataset] = data.to_xarray()
|
||||
|
||||
# Keta_update: smoothed normalized kernel w.r.t. eta (anisotropic parameter) at the 7-th iteration
|
||||
dataset = 'Keta_update_inv_%s'%(Nstep)
|
||||
data = ATTData.read(data_file, par_file, grid_file, group, dataset)
|
||||
kernel_list[dataset] = data.to_xarray()
|
||||
|
||||
# %%
|
||||
import os
|
||||
try:
|
||||
os.mkdir('img')
|
||||
except:
|
||||
pass
|
||||
|
||||
# %%
|
||||
# ---------------- access 3D array ----------------
|
||||
# we can access 3D dataset:
|
||||
|
||||
dep_1d_array = kernel_list['Ks_inv_0007']["dep"]
|
||||
lat_1d_array = kernel_list['Ks_inv_0007']["lat"]
|
||||
lon_1d_array = kernel_list['Ks_inv_0007']["lon"]
|
||||
|
||||
print("3D array of coordinates. \n dep: ", dep_1d_array.shape, " \n lat: ", lat_1d_array.shape, " \n lon: ", lon_1d_array.shape)
|
||||
|
||||
array = kernel_list['Ks_inv_0007']["Ks_inv_0007"]
|
||||
|
||||
print("3D array of kernel. \n Ks: ", array.shape)
|
||||
|
||||
# %%
|
||||
# ---------------- 2D depth profile of kernels ----------------
|
||||
|
||||
for dataset in kernel_list:
|
||||
|
||||
# interp vel at depth = 20 km
|
||||
depth = 20.0
|
||||
kernel = kernel_list[dataset].interp_dep(depth, field=dataset) # kernel[i,:] are (lon, lat, kernel)
|
||||
|
||||
print("kernel at depth = ", depth, " km. kernel:", kernel.shape, ", (lon, lat, kernel)")
|
||||
|
||||
# plot
|
||||
fig = pygmt.Figure()
|
||||
fig.basemap(region=[0,2,0,2], frame=["xa1","ya1","+t%s"%(dataset)], projection="M10c") # base map
|
||||
pygmt.makecpt(cmap="jet", series=[-0.5, 0.5], background=True, reverse=True) # colorbar
|
||||
|
||||
x = kernel[:,0]; # longitude
|
||||
y = kernel[:,1]; # latitude
|
||||
value = kernel[:,2]/np.nanmax(np.abs(kernel[:,2])) # traveltime
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing=0.04,region=[0,2,0,2])
|
||||
|
||||
fig.grdimage(grid = grid) # plot figure
|
||||
fig.text(text="%d km"%(depth), x = 0.2 , y = 0.1, font = "14p,Helvetica-Bold,black", fill = "white")
|
||||
|
||||
# colorbar
|
||||
fig.shift_origin(xshift=0, yshift=-1.5)
|
||||
fig.colorbar(frame = ["a0.5","y+l%s"%(dataset)], position="+e+w4c/0.3c+h")
|
||||
|
||||
fig.savefig("img/3_dep_%s.png"%(dataset))
|
||||
|
||||
# %%
|
||||
# ---------------- 2D vertical profile of kernels ----------------
|
||||
|
||||
for dataset in kernel_list:
|
||||
|
||||
# interp from [0,0.6] in lon-lat to [2,0.6] in lon-lat, gap = 1 km
|
||||
start = [0,0.6]; end = [2,0.6]; gap = 1
|
||||
kernel_sec = kernel_list[dataset].interp_sec(start, end, field=dataset, val = gap) # kernel_sec[i,:] are (lon, lat, dis, dep, kernel)
|
||||
|
||||
print("kernel_sec:", kernel_sec.shape, ", (lon, lat, distance, depth, kernel)")
|
||||
|
||||
# plot
|
||||
fig = pygmt.Figure()
|
||||
fig.basemap(region=[0,2,0,40], frame=["xa1+lLongitude","ya20+lDepth (km)","+t%s"%(dataset)], projection="X10c/-2c") # base map
|
||||
pygmt.makecpt(cmap="jet", series=[-0.5, 0.5], background=True, reverse=True) # colorbar
|
||||
|
||||
x = kernel_sec[:,0]; # longitude
|
||||
y = kernel_sec[:,3]; # depth
|
||||
value = kernel_sec[:,4]/np.nanmax(np.abs(kernel_sec[:,4])) # traveltime
|
||||
grid = pygmt.surface(x=x, y=y, z=value, spacing="0.04/1",region=[0,2,0,40])
|
||||
|
||||
fig.grdimage(grid = grid) # plot figure
|
||||
fig.text(text="A", x = 0.1 , y = 5, font = "14p,Helvetica-Bold,black", fill = "white")
|
||||
fig.text(text="A@+'@+", x = 1.9 , y = 5, font = "14p,Helvetica-Bold,black", fill = "white")
|
||||
|
||||
# colorbar
|
||||
fig.shift_origin(xshift=0, yshift=-2)
|
||||
fig.colorbar(frame = ["a0.5","y+l%s"%(dataset)], position="+e+w4c/0.3c+h")
|
||||
|
||||
fig.savefig("img/3_sec_%s.png"%(dataset))
|
||||
|
||||
|
||||
54
examples/scripts_of_plotting/4_plot_earthquake_station.py
Normal file
54
examples/scripts_of_plotting/4_plot_earthquake_station.py
Normal file
@@ -0,0 +1,54 @@
|
||||
# %%
|
||||
import pygmt
|
||||
pygmt.config(FONT="16p", IO_SEGMENT_MARKER="<<<")
|
||||
|
||||
|
||||
# %%
|
||||
from pytomoatt.src_rec import SrcRec
|
||||
|
||||
# read src_rec_file
|
||||
sr = SrcRec.read("input_files/src_rec_file.dat")
|
||||
|
||||
# get the coordinates of the stations and earthquakes
|
||||
stations = sr.receivers[['stlo','stla','stel']].values.T
|
||||
earthquakes = sr.sources[['evlo','evla','evdp']].values.T
|
||||
|
||||
print(stations.shape)
|
||||
print(earthquakes.shape)
|
||||
|
||||
# %%
|
||||
# plot earthquakes and locations
|
||||
|
||||
fig = pygmt.Figure()
|
||||
|
||||
pygmt.makecpt(cmap="jet", series=[0, 40], background=True, reverse=True) # colorbar
|
||||
|
||||
# -------- horizontal view (x-y) --------
|
||||
fig.basemap(region=[0,2,0,2], frame=["xa1","ya1","NsWe"], projection="M10c") # base map
|
||||
# earthquakes
|
||||
fig.plot(x = earthquakes[0,:], y = earthquakes[1,:], cmap = True, style = "c0.1c", fill = earthquakes[2,:])
|
||||
# stations
|
||||
fig.plot(x = stations[0,:], y = stations[1,:], style = "t0.4c", fill = "blue", pen = "black", label = "Station")
|
||||
|
||||
|
||||
# -------- vertical view (x-z) --------
|
||||
fig.shift_origin(xshift=0, yshift=-3)
|
||||
fig.basemap(region=[0,2,0,40], frame=["xa1","ya20+lDepth (km)","NsWe"], projection="X10c/-2c") # base map
|
||||
# earthquakes
|
||||
fig.plot(x = earthquakes[0,:], y = earthquakes[2,:], cmap = True, style = "c0.1c", fill = earthquakes[2,:])
|
||||
|
||||
|
||||
# -------- vertical view (z-y) --------
|
||||
fig.shift_origin(xshift=11, yshift=3)
|
||||
fig.basemap(region=[0,40,0,2], frame=["xa20+lDepth (km)","ya1","NsWe"], projection="X2c/10c") # base map
|
||||
# earthquakes
|
||||
fig.plot(x = earthquakes[2,:], y = earthquakes[1,:], cmap = True, style = "c0.1c", fill = earthquakes[2,:])
|
||||
|
||||
|
||||
# colorbar
|
||||
fig.shift_origin(xshift=0, yshift=-1.5)
|
||||
fig.colorbar(frame = ["a20","x+lDepth (km)"], position="+e+w2c/0.3c+h")
|
||||
|
||||
fig.savefig("img/4_earthquakes_and_stations.png")
|
||||
|
||||
|
||||
124
examples/scripts_of_plotting/5_plot_objective_function.py
Normal file
124
examples/scripts_of_plotting/5_plot_objective_function.py
Normal file
@@ -0,0 +1,124 @@
|
||||
# %%
|
||||
import sys
|
||||
sys.path.append('../utils')
|
||||
import functions_for_data as ffd
|
||||
|
||||
# %%
|
||||
# read objective function
|
||||
|
||||
path = "OUTPUT_FILES"
|
||||
full_curve, location_curve, model_curve = ffd.read_objective_function_file(path)
|
||||
|
||||
print("full_curve: ", full_curve.shape, ", the total objective function value during the inversion, including relocation and model update")
|
||||
print("location_curve: ", location_curve.shape, ", the objective function value during the relocation step")
|
||||
print("model_curve: ", model_curve.shape, ", the objective function value during the model update step")
|
||||
|
||||
print("The first index is iteraion number, the second index is the objective function value vector")
|
||||
|
||||
|
||||
# %%
|
||||
# (Option 1) objective function value
|
||||
full_obj = full_curve[:,0]
|
||||
location_obj = location_curve[:,0]
|
||||
model_obj = model_curve[:,0]
|
||||
|
||||
# (Option 2) objective function value for only traveltime
|
||||
full_obj_tt = full_curve[:,1]
|
||||
location_obj_tt = location_curve[:,1]
|
||||
model_obj_tt = model_curve[:,1]
|
||||
|
||||
# (Option 3) objective function value for only common source differential arrival time
|
||||
full_obj_cs = full_curve[:,2]
|
||||
location_obj_cs = location_curve[:,2]
|
||||
model_obj_cs = model_curve[:,2]
|
||||
|
||||
# (Option 4) objective function value for only common receiver differential arrival time
|
||||
full_obj_cr = full_curve[:,3]
|
||||
location_obj_cr = location_curve[:,3]
|
||||
model_obj_cr = model_curve[:,3]
|
||||
|
||||
# (Option 5) objective function value for teleseismic differential arrival time
|
||||
full_obj_tele = full_curve[:,4]
|
||||
location_obj_tele = location_curve[:,4]
|
||||
model_obj_tele = model_curve[:,4]
|
||||
|
||||
# (Option 6) mean value of all data residual
|
||||
full_mean = full_curve[:,5]
|
||||
location_mean = location_curve[:,5]
|
||||
model_mean = model_curve[:,5]
|
||||
|
||||
# (Option 7) standard deviation of all data residual
|
||||
full_std = full_curve[:,6]
|
||||
location_std = location_curve[:,6]
|
||||
model_std = model_curve[:,6]
|
||||
|
||||
# (Option 8) mean value of residuals of traveltime
|
||||
full_mean_tt = full_curve[:,7]
|
||||
location_mean_tt = location_curve[:,7]
|
||||
model_mean_tt = model_curve[:,7]
|
||||
|
||||
# (Option 9) standard deviation of residuals of traveltime
|
||||
full_std_tt = full_curve[:,8]
|
||||
location_std_tt = location_curve[:,8]
|
||||
model_std_tt = model_curve[:,8]
|
||||
|
||||
# (Option 10) mean value of residuals of common source differential arrival time
|
||||
full_mean_cs = full_curve[:,9]
|
||||
location_mean_cs = location_curve[:,9]
|
||||
model_mean_cs = model_curve[:,9]
|
||||
|
||||
# (Option 11) standard deviation of residuals of common source differential arrival time
|
||||
full_std_cs = full_curve[:,10]
|
||||
location_std_cs = location_curve[:,10]
|
||||
model_std_cs = model_curve[:,10]
|
||||
|
||||
# (Option 12) mean value of residuals of common receiver differential arrival time
|
||||
full_mean_cr = full_curve[:,11]
|
||||
location_mean_cr = location_curve[:,11]
|
||||
model_mean_cr = model_curve[:,11]
|
||||
|
||||
# (Option 13) standard deviation of residuals of common receiver differential arrival time
|
||||
full_std_cr = full_curve[:,12]
|
||||
location_std_cr = location_curve[:,12]
|
||||
model_std_cr = model_curve[:,12]
|
||||
|
||||
# (Option 14) mean value of residuals of teleseismic differential arrival time
|
||||
full_mean_tele = full_curve[:,13]
|
||||
location_mean_tele = location_curve[:,13]
|
||||
model_mean_tele = model_curve[:,13]
|
||||
|
||||
# (Option 15) standard deviation of residuals of teleseismic differential arrival time
|
||||
full_std_tele = full_curve[:,14]
|
||||
location_std_tele = location_curve[:,14]
|
||||
model_std_tele = model_curve[:,14]
|
||||
|
||||
|
||||
# %%
|
||||
import os
|
||||
try:
|
||||
os.mkdir("img")
|
||||
except:
|
||||
pass
|
||||
|
||||
# %%
|
||||
# plot objective functin reduction
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
plt.figure(figsize=(10, 6))
|
||||
ax = plt.subplot(1, 1, 1)
|
||||
|
||||
ax.plot(model_obj/np.max(model_obj), label='objective function', linewidth=2)
|
||||
ax.set_xlim([-0.2, len(model_obj)-0.8])
|
||||
ax.set_ylim([0, 1.1])
|
||||
ax.grid()
|
||||
ax.set_xlabel('Iteration number',fontsize=14)
|
||||
ax.set_ylabel('Normalized value',fontsize=14)
|
||||
ax.tick_params(axis='x', labelsize=14)
|
||||
ax.tick_params(axis='y', labelsize=14)
|
||||
ax.legend(fontsize=14)
|
||||
|
||||
plt.savefig('img/5_objective_function_reduction.png', dpi=300, bbox_inches='tight', edgecolor='w', facecolor='w')
|
||||
|
||||
|
||||
86
examples/scripts_of_plotting/6_plot_data_residual.py
Normal file
86
examples/scripts_of_plotting/6_plot_data_residual.py
Normal file
@@ -0,0 +1,86 @@
|
||||
# %%
|
||||
import sys
|
||||
sys.path.append('../utils')
|
||||
import functions_for_data as ffd
|
||||
|
||||
|
||||
# %%
|
||||
# synthetic and observational traveltime files in the initial and final models
|
||||
|
||||
file_init_syn = "OUTPUT_FILES/src_rec_file_inv_0000_reloc_0000.dat" # synthetic traveltime in the initial model
|
||||
file_init_obs = "input_files/src_rec_file.dat" # observational traveltime in the initial model
|
||||
|
||||
file_final_syn = "OUTPUT_FILES/src_rec_file_inv_0009_reloc_0009.dat" # synthetic traveltime in the final model
|
||||
file_final_obs = "OUTPUT_FILES/src_rec_file_inv_0009_reloc_0009_obs.dat" # observational traveltime in the final model
|
||||
|
||||
|
||||
# %%
|
||||
# from pytomoatt.src_rec import SrcRec
|
||||
# init_syn = SrcRec.read(file_init_syn)
|
||||
# init_obs = SrcRec.read(file_init_obs)
|
||||
|
||||
# final_syn = SrcRec.read(file_final_syn)
|
||||
# final_obs = SrcRec.read(file_final_obs)
|
||||
|
||||
# %%
|
||||
ev, st = ffd.read_src_rec_file(file_init_syn)
|
||||
time_init_syn = ffd.data_dis_time(ev, st)[1] # synthetic traveltime in the initial model
|
||||
|
||||
ev, st = ffd.read_src_rec_file(file_init_obs)
|
||||
time_init_obs = ffd.data_dis_time(ev, st)[1] # observational traveltime in the initial model
|
||||
|
||||
ev, st = ffd.read_src_rec_file(file_final_syn)
|
||||
time_final_syn = ffd.data_dis_time(ev, st)[1] # synthetic traveltime in the final model
|
||||
|
||||
ev, st = ffd.read_src_rec_file(file_final_obs)
|
||||
time_final_obs = ffd.data_dis_time(ev, st)[1] # observational traveltime in the final model
|
||||
|
||||
# %%
|
||||
import os
|
||||
try:
|
||||
os.mkdir("img")
|
||||
except:
|
||||
pass
|
||||
|
||||
# %%
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
fig = plt.figure(figsize=(8,8))
|
||||
ax = fig.add_subplot(111)
|
||||
|
||||
range_l = -1.5
|
||||
range_r = 1.5
|
||||
Nbar = 20
|
||||
|
||||
bins=np.linspace(range_l,range_r,Nbar)
|
||||
error_init = time_init_syn - time_init_obs
|
||||
error_final = time_final_syn - time_final_obs
|
||||
|
||||
tag1 = "initial mode"
|
||||
tag2 = "final mode"
|
||||
|
||||
hist_init, _, _ = ax.hist(error_init,bins=bins,histtype='step', edgecolor = "red", linewidth = 2,
|
||||
label = "%s: std = %5.3f s, mean = %5.3f s"%(tag1,np.std(error_init),np.mean(error_init)))
|
||||
|
||||
hist_final, _, _ = ax.hist(error_final,bins=bins,alpha = 0.5, color = "blue",
|
||||
label = "%s: std = %5.3f s, mean = %5.3f s"%(tag2,np.std(error_final),np.mean(error_final)))
|
||||
|
||||
print("residual for ",tag1," model is: ","mean: ",np.mean(error_init),"sd: ",np.std(error_init))
|
||||
print("residual for ",tag2," model is: ","mean: ",np.mean(error_final),"sd: ",np.std(error_final))
|
||||
ax.legend(fontsize=14)
|
||||
|
||||
ax.set_xlim(range_l - abs(range_l)*0.1,range_r + abs(range_r)*0.1)
|
||||
ax.set_ylim(0,1.3*max(max(hist_init),max(hist_final)))
|
||||
|
||||
ax.tick_params(axis='x',labelsize=18)
|
||||
ax.tick_params(axis='y',labelsize=18)
|
||||
ax.set_ylabel('Count of data',fontsize=18)
|
||||
ax.set_xlabel('Traveltime residuals (s)',fontsize=18)
|
||||
ax.set_title("$t_{syn} - t_{obs}$",fontsize=18)
|
||||
ax.grid()
|
||||
|
||||
plt.savefig("img/6_data_residual.png",dpi=300, bbox_inches='tight', edgecolor='w', facecolor='w' )
|
||||
plt.show()
|
||||
|
||||
|
||||
61
examples/scripts_of_plotting/7_plot_inversion_grid.py
Normal file
61
examples/scripts_of_plotting/7_plot_inversion_grid.py
Normal file
@@ -0,0 +1,61 @@
|
||||
# %%
|
||||
import pygmt
|
||||
pygmt.config(FONT="16p", IO_SEGMENT_MARKER="<<<")
|
||||
|
||||
|
||||
# %%
|
||||
import sys
|
||||
sys.path.append('../utils')
|
||||
import functions_for_data as ffd
|
||||
|
||||
# %%
|
||||
# read inversion grid file
|
||||
|
||||
inv_grid_vel, inv_grid_ani = ffd.read_inversion_grid_file("OUTPUT_FILES")
|
||||
|
||||
print("inversion grid for velocity: ", inv_grid_vel.shape)
|
||||
print("inversion grid for anisotropy: ", inv_grid_vel.shape)
|
||||
|
||||
Nset = inv_grid_vel.shape[0]
|
||||
Ngrid = inv_grid_vel.shape[1]
|
||||
|
||||
colorlist = ["green","blue","red","purple","orange","yellow","black","gray","pink","cyan"]
|
||||
|
||||
# %%
|
||||
# plot earthquakes and locations
|
||||
|
||||
fig = pygmt.Figure()
|
||||
|
||||
pygmt.makecpt(cmap="jet", series=[0, 40], background=True, reverse=True) # colorbar
|
||||
|
||||
# -------- horizontal view (x-y) --------
|
||||
fig.basemap(region=[0,2,0,2], frame=["xa1","ya1","NsWe"], projection="M10c") # base map
|
||||
# plot inversion grid
|
||||
for igrid in range(Nset):
|
||||
x = inv_grid_vel[igrid,:,0]
|
||||
y = inv_grid_vel[igrid,:,1]
|
||||
fig.plot(x=x, y=y, style="c0.1c", fill=colorlist[igrid])
|
||||
|
||||
# -------- vertical view (x-z) --------
|
||||
fig.shift_origin(xshift=0, yshift=-3)
|
||||
fig.basemap(region=[0,2,0,40], frame=["xa1","ya20+lDepth (km)","NsWe"], projection="X10c/-2c") # base map
|
||||
# plot inversion grid
|
||||
for igrid in range(Nset):
|
||||
x = inv_grid_vel[igrid,:,0]
|
||||
y = inv_grid_vel[igrid,:,2]
|
||||
fig.plot(x=x, y=y, style="c0.1c", fill=colorlist[igrid])
|
||||
|
||||
|
||||
# -------- vertical view (z-y) --------
|
||||
fig.shift_origin(xshift=11, yshift=3)
|
||||
fig.basemap(region=[0,40,0,2], frame=["xa20+lDepth (km)","ya1","NsWe"], projection="X2c/10c") # base map
|
||||
# plot inversion grid
|
||||
for igrid in range(Nset):
|
||||
x = inv_grid_vel[igrid,:,2]
|
||||
y = inv_grid_vel[igrid,:,1]
|
||||
fig.plot(x=x, y=y, style="c0.1c", fill=colorlist[igrid])
|
||||
|
||||
|
||||
fig.savefig("img/7_inversion_grid.png")
|
||||
|
||||
|
||||
16
examples/scripts_of_plotting/README.md
Normal file
16
examples/scripts_of_plotting/README.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# Scripts of plotting
|
||||
|
||||
It includes examples to illustrate the output file of TomoATT
|
||||
|
||||
|
||||
Python modules are required to initiate the inversion and to plot final results:
|
||||
- h5py
|
||||
- PyTomoAT
|
||||
- Pygmt
|
||||
- gmt
|
||||
|
||||
Run this example:
|
||||
|
||||
1. Run bash script `bash run_this_example.sh` to execute the test.
|
||||
|
||||
|
||||
21
examples/scripts_of_plotting/prepare_files.py
Normal file
21
examples/scripts_of_plotting/prepare_files.py
Normal file
@@ -0,0 +1,21 @@
|
||||
# %%
|
||||
# download model file from Zenodo
|
||||
import os
|
||||
import requests
|
||||
|
||||
url = 'https://zenodo.org/records/14160818/files/files_for_plotting.tar.gz?download=1'
|
||||
|
||||
path = "files_for_plotting.tar.gz"
|
||||
|
||||
# check file existence
|
||||
if not os.path.exists(path):
|
||||
print("Downloading src_rec_file.dat from Zenodo...")
|
||||
print("The file is about 400 MB, so it may take a while.")
|
||||
response = requests.get(url, stream=True)
|
||||
with open(path, 'wb') as out_file:
|
||||
out_file.write(response.content)
|
||||
print("Download complete.")
|
||||
else:
|
||||
print("files_for_plotting.tar.gz already exists.")
|
||||
|
||||
|
||||
50
examples/scripts_of_plotting/run_this_example.sh
Normal file
50
examples/scripts_of_plotting/run_this_example.sh
Normal file
@@ -0,0 +1,50 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Some script to plot figures for the output file of TomoATT
|
||||
|
||||
python prepare_files.py # download the files for plotting
|
||||
tar -xf files_for_plotting.tar.gz # extract the files
|
||||
|
||||
|
||||
# Test 1: plot velocity perturbation and azimuthal anisotropy fields to generate
|
||||
# img/1_dep_vel.png 2D velocity perturbation at 20 km depth
|
||||
# img/1_dep_ani.png 2D azimuthal anisotropy at 20 km depth
|
||||
# img/1_sec_vel.png 2D velocity perturbation along vertical section
|
||||
python 1_plot_model.py
|
||||
|
||||
# Test 2: plot traveltime and adjoint fields to generate
|
||||
# img/2_dep_time.png 2D traveltime field at 20 km depth
|
||||
# img/2_sec_time.png 2D traveltime field along vertical section
|
||||
# img/2_dep_adjoint.png 2D adjoint field at XX depth
|
||||
# img/2_sec_adjoint.png 2D adjoint field along vertical section
|
||||
python 2_plot_time_field.py
|
||||
|
||||
# Test 3: plot kernels to generate
|
||||
# img/3_dep_Ks_inv_0007.png Ks: original kernel w.r.t slowness at 20 km depth
|
||||
# img/3_dep_Ks_density_inv_0007.png Kden: kernel density w.r.t slowness at 20 km depth
|
||||
# img/3_dep_Ks_over_Kden_inv_0007.png Ks/Kden^{\zeta}: normalized kernel w.r.t slowness at 20 km depth
|
||||
# img/3_dep_Ks_update_inv_0007.png smoothed normalized kernel w.r.t slowness at 20 km depth
|
||||
# img/3_sec_Ks_inv_0007.png Ks: original kernel w.r.t slowness along vertical section
|
||||
# img/3_sec_Ks_density_inv_0007.png Kden: kernel density w.r.t slowness along vertical section
|
||||
# img/3_sec_Ks_over_Kden_inv_0007.png Ks/Kden^{\zeta}: normalized kernel w.r.t slowness along vertical section
|
||||
# img/3_sec_Ks_update_inv_0007.png smoothed normalized kernel w.r.t slowness along vertical section
|
||||
# and the same for kernels w.r.t xi and eta (azimuthal anisotropy)
|
||||
python 3_plot_kernel.py
|
||||
|
||||
# Test 4: plot earthquakes and stations to generate
|
||||
# img/4_earthquakes_and_stations.png the location of earthquakes and stations
|
||||
python 4_plot_earthquake_station.py
|
||||
|
||||
# Test 5: plot objective function reduction to generate
|
||||
# img/5_objective_function_reduction.png the reduction of objective function value
|
||||
python 5_plot_objective_function.py
|
||||
|
||||
# Test 6: plot traveltime residuals to generate
|
||||
# img/6_data_residual.png the traveltime residuals
|
||||
python 6_plot_data_residual.py
|
||||
|
||||
# Test 7: plot inversion grid to generate
|
||||
# img/7_inversion_grid.png the inversion grid
|
||||
python 7_plot_inversion_grid.py
|
||||
|
||||
|
||||
BIN
examples/src_rec_file_instruction.jpg
Normal file
BIN
examples/src_rec_file_instruction.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1.9 MiB |
2719
examples/utils/functions_for_data.ipynb
Normal file
2719
examples/utils/functions_for_data.ipynb
Normal file
File diff suppressed because it is too large
Load Diff
2414
examples/utils/functions_for_data.py
Normal file
2414
examples/utils/functions_for_data.py
Normal file
File diff suppressed because it is too large
Load Diff
15
examples/utils/svel13_chen.cpt
Normal file
15
examples/utils/svel13_chen.cpt
Normal file
@@ -0,0 +1,15 @@
|
||||
-0.7000E+01 166 0 0 -0.6000E+01 225 0 0
|
||||
-0.6000E+01 225 0 0 -0.5000E+01 255 75 0
|
||||
-0.5000E+01 255 75 0 -0.4000E+01 255 132 0
|
||||
-0.4000E+01 255 132 0 -0.3000E+01 255 198 0
|
||||
-0.3000E+01 255 198 0 -0.2000E+01 255 255 0
|
||||
-0.2000E+01 255 255 0 -0.1000E+00 255 255 255
|
||||
-0.1000E+00 255 255 255 0.1000E+00 255 255 255
|
||||
0.1000E+00 255 255 255 0.2000E+01 0 255 255
|
||||
0.2000E+01 0 255 255 0.3000E+01 90 205 255
|
||||
0.3000E+01 90 205 255 0.4000E+01 26 160 255
|
||||
0.4000E+01 26 160 255 0.5000E+01 0 100 255
|
||||
0.5000E+01 0 100 255 0.6000E+01 0 50 200
|
||||
0.6000E+01 0 50 200 0.7000E+01 0 10 160
|
||||
B 166 0 0
|
||||
F 0 10 160
|
||||
BIN
external_libs/.DS_Store
vendored
Normal file
BIN
external_libs/.DS_Store
vendored
Normal file
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user