Skip to content
Snippets Groups Projects
Unverified Commit 5f5f89cb authored by Kelvin Rodriguez's avatar Kelvin Rodriguez Committed by GitHub
Browse files

Mkdocs update (#26)

* reworked docs into mkdocs

* added deploy stuff

* added new build to tests

* update env

* fixed env

* another env update

* forgot material

* addressed comments and changed themes
parent f6495c0c
No related branches found
No related tags found
No related merge requests found
name: CMake name: Build-And-Test
on: [push, pull_request] on: [push, pull_request]
...@@ -99,42 +99,16 @@ jobs: ...@@ -99,42 +99,16 @@ jobs:
conda info conda info
conda list conda list
- name: Create Build Environment
# Some projects don't allow in-source building, so create separate build and install
# directorie; we'll use them as our working directories for subsequent commands.
run: |
cmake -E make_directory ${{github.workspace}}/build
cmake -E make_directory ${{github.workspace}}/install
- name: Configure CMake
working-directory: ${{github.workspace}}/build
run: cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DSPICEQL_BUILD_LIB=OFF -DCMAKE_INSTALL_PREFIX=${{github.workspace}}/install
- name: Build - name: Build
working-directory: ${{github.workspace}}/build working-directory: ${{github.workspace}}/
# Execute the build. # Execute the build.
run: | run: |
cmake --build . --config $BUILD_TYPE mkdocs build
- name: Check Build Docs - name: Check Build Docs
working-directory: ${{github.workspace}}/build/docs/sphinx working-directory: ${{github.workspace}}/site
# Check for the built docs # Check for the built docs
run: | run: |
test -e index.html test -e index.html
test -e reference/api.html
test -e reference/tutorials.html
- name: Install Docs
working-directory: ${{github.workspace}}/build
# Install the build.
run: |
cmake --install . --config $BUILD_TYPE
- name: Check Install Docs
working-directory: ${{github.workspace}}/install
# Check for the built docs
run: |
test -e share/doc/SpiceQL/sphinx/index.html
test -e share/doc/SpiceQL/sphinx/reference/api.html
test -e share/doc/SpiceQL/sphinx/reference/tutorials.html
...@@ -70,33 +70,21 @@ jobs: ...@@ -70,33 +70,21 @@ jobs:
cmake -E make_directory ${{github.workspace}}/build cmake -E make_directory ${{github.workspace}}/build
cmake -E make_directory ${{github.workspace}}/install cmake -E make_directory ${{github.workspace}}/install
- name: Configure CMake
working-directory: ${{github.workspace}}/build
run: cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DSPICEQL_BUILD_LIB=OFF -DCMAKE_INSTALL_PREFIX=${{github.workspace}}/install
- name: Build - name: Build
working-directory: ${{github.workspace}}/build working-directory: ${{github.workspace}}
# Execute the build. # Execute the build.
run: | run: |
cmake --build . mkdocs build
- name: Check Build Docs - name: Check Build Docs
working-directory: ${{github.workspace}}/build/docs/sphinx working-directory: ${{github.workspace}}/site/
# Check for the built docs # Check for the built docs
run: | run: |
test -e index.html test -e index.html
test -e reference/api.html
test -e reference/tutorials.html
- name: Install Docs
working-directory: ${{github.workspace}}/build
# Install the build.
run: |
cmake --install .
- name: Upload to S3 - name: Upload to S3
working-directory: ${{github.workspace}}/install working-directory: ${{github.workspace}}/
run: | run: |
ls -l share/doc/SpiceQL/sphinx/ ls -l share/doc/SpiceQL/sphinx/
aws s3 sync --delete share/doc/SpiceQL/sphinx/ s3://asc-public-docs/software_manuals/spiceql/ aws s3 sync --delete site/ s3://asc-public-docs/software_manuals/spiceql/
...@@ -204,14 +204,3 @@ else() ...@@ -204,14 +204,3 @@ else()
message(STATUS "Skipping Bindings") message(STATUS "Skipping Bindings")
endif() endif()
##############
# Docs Build #
##############
option (SPICEQL_BUILD_DOCS "Build the SpiceQL Docs" ON)
if(SPICEQL_BUILD_DOCS)
add_subdirectory ("docs")
else()
message(STATUS "Skipping Docs")
endif()
%module pyspiceql %module pyspiceql
%feature("autodoc", "3");
%include "std_vector.i" %include "std_vector.i"
%include "std_string.i" %include "std_string.i"
%include "std_array.i" %include "std_array.i"
......
message(STATUS "Setting Up Docs")
find_package(Doxygen REQUIRED)
find_package(Sphinx REQUIRED)
# Find all the public headers
file(GLOB_RECURSE SPICEQL_PUBLIC_HEADERS ${SPICEQL_BUILD_INCLUDE_DIR}/*.h)
set(DOXYGEN_INPUT_DIR ${PROJECT_SOURCE_DIR}/SpiceQL/include/)
set(DOXYGEN_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR})
set(DOXYGEN_INDEX_FILE ${DOXYGEN_OUTPUT_DIR}/xml/index.xml)
set(DOXYFILE_IN ${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in)
set(DOXYFILE_OUT ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile)
configure_file(${DOXYFILE_IN} ${DOXYFILE_OUT} @ONLY)
message(STATUS "DOXYGEN_INPUT_DIR: " ${DOXYGEN_INPUT_DIR})
message(STATUS "DOXYGEN_OUTPUT_DIR: " ${DOXYGEN_OUTPUT_DIR})
message(STATUS "DOXYGEN_INDEX_FILE: " ${DOXYGEN_INDEX_FILE})
message(STATUS "DOXYFILE_IN: " ${DOXYFILE_IN})
message(STATUS "DOXYFILE_OUT: " ${DOXYFILE_OUT})
file(MAKE_DIRECTORY ${DOXYGEN_OUTPUT_DIR})
add_custom_command(OUTPUT ${DOXYGEN_INDEX_FILE}
DEPENDS ${SPICEQL_PUBLIC_HEADERS}
COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYFILE_OUT}
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
MAIN_DEPENDENCY Doxyfile
COMMENT "Generating docs"
VERBATIM)
add_custom_target(Doxygen ALL DEPENDS ${DOXYGEN_INDEX_FILE})
set(SPHINX_SOURCE ${CMAKE_CURRENT_SOURCE_DIR})
set(SPHINX_BUILD ${CMAKE_CURRENT_BINARY_DIR}/sphinx)
set(SPHINX_INDEX_FILE ${SPHINX_BUILD}/index.html)
set(SPHINX_RST_FILES ${CMAKE_CURRENT_SOURCE_DIR}/index.rst
${CMAKE_CURRENT_SOURCE_DIR}/reference/api.rst
${CMAKE_CURRENT_SOURCE_DIR}/reference/tutorials.rst)
add_custom_command(OUTPUT ${SPHINX_INDEX_FILE}
COMMAND
${SPHINX_EXECUTABLE} -b html
# Tell Breathe where to find the Doxygen output
-Dbreathe_projects.SpiceQL=${DOXYGEN_OUTPUT_DIR}/xml
${SPHINX_SOURCE} ${SPHINX_BUILD}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
DEPENDS ${SPICEQL_PUBLIC_HEADERS}
# Other docs files you want to track should go here (or in some variable)
${SPHINX_RST_FILES}
${CMAKE_CURRENT_SOURCE_DIR}/../README.md # Docs insert the readme, so it's a dep
${DOXYGEN_INDEX_FILE}
# MAIN_DEPENDENCY ${SPHINX_SOURCE}/conf.py
COMMENT "Generating documentation with Sphinx")
# Nice named target so we can run the job easily
add_custom_target(Sphinx ALL DEPENDS ${SPHINX_INDEX_FILE})
# Add an install target to install the docs
include(GNUInstallDirs)
install(DIRECTORY ${SPHINX_BUILD}
DESTINATION ${CMAKE_INSTALL_DOCDIR})
This diff is collapsed.
<swagger-ui src="http://asc-public-docs.s3.us-west-2.amazonaws.com/restful-apis/spiceql-prod.yml"/>
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import subprocess, os
breathe_projects = {}
# Check if we're running on Read the Docs' servers
read_the_docs_build = os.environ.get('READTHEDOCS', None) == 'True'
build_started = os.path.isfile("CMakeCache.txt")
# go into main build directory
# os.chdir('../build')
# Print some debug stuff
print("READ THE DOCS SERVER FLAG: ", read_the_docs_build)
print("BUILD FLAG: ", build_started)
print("WORKING DIR: ", os.getcwd())
output_dir = 'docs'
# subprocess.call('doxygen', shell=True)
breathe_projects['SpiceQL'] = output_dir + '/xml'
# -- Project information -----------------------------------------------------
project = 'SpiceQL'
copyright = '2021, USGS'
author = 'USGS Astrogeology'
# The full version, including alpha/beta/rc tags
release = '1.0.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [ "breathe", "m2r2", "sphinx_material", "sphinxcontrib.gist"]
# Breathe Configuration
breathe_default_project = "SpiceQL"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_material'
html_favicon = "favicon.ico"
# Material theme options (see theme.conf for more information)
html_theme_options = {
# Set the name of the project to appear in the navigation.
'nav_title': 'Sugar Spice',
# Set you GA account ID to enable tracking
# 'google_analytics_account': 'UA-XXXXX',
# Specify a base_url used to generate sitemap.xml. If not
# specified, then no sitemap will be built.
# 'base_url': 'https://github.com/DOI-USGS/SpiceQL',
# Set the color and the accent color
'color_primary': '#000000',
'color_accent': 'light-blue',
'logo_icon': '&#xe87a',
# Set the repo location to get a badge with stats
'repo_url': 'https://github.com/DOI-USGS/SpiceQL',
'repo_name': 'Project',
# Visible levels of the global TOC; -1 means unlimited
'globaltoc_depth': 3,
# If False, expand all TOC entries
'globaltoc_collapse': False,
# If True, show hidden TOC entries
'globaltoc_includehidden': False,
}
html_sidebars = {
"**": ["logo-text.html", "globaltoc.html", "localtoc.html", "searchbox.html"]
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
if read_the_docs_build and not build_started:
subprocess.call('cmake ..', shell=True)
subprocess.call('make Sphinx', shell=True)
os.environ["SPICEQLDOCSBUILD"] = "True"
docs/favicon.ico

15 KiB

# SpiceQL
This Library provides a C++ interface querying, reading and writing Naif SPICE kernels. Built on the [Naif Toolkit](https://naif.jpl.nasa.gov/naif/toolkit.html).
## Building The Library
The library leverages anaconda to maintain all of its dependencies. So in order to build SpiceQL, you'll need to have Anaconda installed.
> **NOTE**:If you already have Anaconda installed, skip to step 3.
1. Download either the Anaconda or Miniconda installation script for your OS platform. Anaconda is a much larger distribtion of packages supporting scientific python, while Miniconda is a minimal installation and not as large: Anaconda installer, Miniconda installer
1. If you are running on some variant of Linux, open a terminal window in the directory where you downloaded the script, and run the following commands. In this example, we chose to do a full install of Anaconda, and our OS is Linux-based. Your file name may be different depending on your environment.
* If you are running Mac OS X, a pkg file (which looks similar to Anaconda3-5.3.0-MacOSX-x86_64.pkg) will be downloaded. Double-click on the file to start the installation process.
1. Open a Command line prompt and run the following commands:
```bash
# Clone the Github repo, note the recursive flag, this library depends on
# submodules that also need to be cloned. --recurse-submodules enables this and
# the -j8 flag parallelizes the cloning process.
git clone --recurse-submodules -j8 https://github.com/DOI-USGS/SpiceQL.git
# cd into repo dir
cd SpiceQL
# Create new environment from the provided dependency file, the -n flag is
# proceded by the name of the new environment, change this to whatever works for you
conda env create -f environment.yml -n ssdev
# activate the new env
conda activate ssdev
# make and cd into the build directory. This can be placed anywhere, but here, we make
# it in the repo (build is in .gitingore, so no issues there)
mkdir build
cd build
# Configure the project, install directory can be anything, here, it's the conda env
cmake .. -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX
# Optional: DB files are installed by default in $CONDA_PREFIX/etc/SpiceQL/db to
# use files that are included within the repo, you must create and define
# an environment variable named SSPICE_DEBUG.
# note SSPICE_DEBUG can be set to anything as long as it is defined
export SSPICE_DEBUG=True
# Set the environment variable(s) to point to your kernel install
# The following environment variables are used by default in order of priority:
# $SPICEROOT, $ALESPICEROOT, $ISISDATA.
# SPICEROOT is unique to this lib, while ALESPICEROOT, and ISISDATA are used
# by both ALE and ISIS respectively.
# note you can set each of these environment variables path to point to the
# correspoding kernels downloaded location, ie
SPICEROOT=~/spiceQL/Kernals/spiceRootKernel
ALESPICEROOT=~/spiceQL/Kernals/aleSpiceRootKernel
ISISDATA=~/spiceQL/Kernals/isisData
# build and install project
make install
# Optional, Run tests
ctest -j8
```
You can disable different components of the build by setting the CMAKE variables `SPICEQL_BUILD_DOCS`, `SPICEQL_BUILD_TESTS`, `SPICEQL_BUILD_BINDINGS`, or `SPICEQL_BUILD_LIB` to `OFF`. For example, the following cmake configuration command will not build the documentation or the tests:
```
cmake .. -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX -DSPICEQL_BUILD_DOCS=OFF -DSPICEQL_BUILD_TESTS=OFF
```
## Bindings
The SpiceQL API is available via Python bindings in the module `pyspiceql`. The bindings are built using SWIG and are on by default. You can disable the bindings in your build by setting `SPICEQL_BUILD_BINDINGS` to `OFF` when configuring your build.
## Memoization Header Library
SpiceQL has a simple memoization header only library at `Spiceql/include/memo.h`. This can cache function results on disk using a binary archive format mapped using a combined hash of a function ID and its input parameters.
TLDR
```C++
#include "memo.h"
int func(int) { ... }
memoization::disk c("cache_path");
// use case 1: wrap function call
// (function ID, the function to wrap and then params
int result1 = c("func_id", func, 3);
// use case 2: wrap function
// (cache object, function ID, function)
auto func_memoed = memoization::make_memoized(c, "func_id", func);
int result2 = func_memoed(3);
assert(result1 == result2);
```
.. SpiceQL documentation master file, created by
sphinx-quickstart on Fri Mar 12 18:57:11 2021.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
.. mdinclude:: ../README.md
Docs
====
.. toctree::
:maxdepth: 2
:caption: Contents:
reference/api
reference/tutorials
###
API
###
.. doxygennamespace:: SpiceQL
:project: SpiceQL
\ No newline at end of file
Tutorials
#########
.. contents:: Table of Contents
:depth: 3
Required Reading
================
There are many niche topics important to understand how SPICE and kernels work. The important things to know:
1. What are kernels?
2. What are the different kernel types?
3. What are the different kernel qualities?
Here are some resources:
* Intro to Kernels, includes descriptions of kernel types: https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/Tutorials/pdf/individual_docs/12_intro_to_kernels
* Useful glossary of terms: https://isis.astrogeology.usgs.gov/documents/Glossary/Glossary.html
* Kerneldbgen, used to create ISIS DB files, docs has useful descriptions of Kernel qualities: https://isis.astrogeology.usgs.gov/Application/presentation/Tabbed/kerneldbgen/kerneldbgen.html
* NAIF required reading on SPICE: https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/req/index.html
* ISIS repo, for more general information, many wikis: https://github.com/DOI-USGS/ISIS3/pulls?q=is%3Apr+is%3Aclosed
Creating New Mission Config Files
=================================
.. _kerneldbgen: https://isis.astrogeology.usgs.gov/Application/presentation/Tabbed/kerneldbgen/kerneldbgen.html
.. _emcascript: http://www.cplusplus.com/reference/regex/ECMAScript/
NAIF does not provide adequate resources to programmatically get all required metadata. ISIS solves this problem by creating "DB Files", PVL files containing metadata for every kernel in the ISIS data area. These DB files are a predominately human curiated in that either the Kernel files are hand written or a script (generally called `makedb`, co-located with the kernels in the kernel directory) is hand written to use kerneldbgen_ app to cache most of the kernel metadata.
Issues with ISIS's approach:
* DB files are co-located with kernels and therefore makes it difficult to release them seperately from the kernels.
* Kernels are specifically tied to the ISIS data area and cannot be used directly on NAIF distributed kernels.
* As these DB files are in the ISIS data area, they cannot be versioned controlled seperately.
* If you add kernels, you have to regenerate the kernel files.
* ISIS's code for searching and exploiting Kernels is not modularied.
* Kernel search works on a per-image basis and cannot be used for arbitrary SPICE queries.
SpiceQL more-or-less rewrites much of ISIS's Kernel searching and exploitation code.
Distictions in SpiceQL's approach:
* DB files, here reffered to Mission config files, are located JSON files that can be shipped seperate from data installs.
* Config files leverage common Kernel naming schemes so you can use both NAIF installed kernels, but also kernels from USGS Astro or any other source.
* Config files are source controlled.
* If you add Kernels, you can easily modify them locally, and contribute your changes for the new Kernels.
* Sugar Spice modularizes most of the Kernel code for search and I/O.
* Config files can be used for more generalized kernel queries
Understanding how ISIS stores Kernel metadata
*********************************************
Structure of ISIS data area, ignoring non-kernel data:
.. code-block:: shell
.
└── $ISISDATA/
└── <mission>/
└── kernels/
├── <Binary Kernel Type>/
│ ├── <kernel db files>
│ ├── <Kernel makedb files>
│ └── <kernel files>
└── <Text Kernel Type>/
├── <kernel db files>
└── <kernel files>
Where ``<mission>`` the name of any particular mission (mro, odyssey, messenger, etc.), ``<Kernel Type>`` is with slightly different structure if the kernel type is binary (CK, SPK, stc.) or text (IK, IAK, LSK, etc.), This section focuses on the Kernel "makedb" and "db" files.
Each kernel directory (ck, spk, ik, etc.) have a file called ``kernels.[0-9]{4}.db`` where ``[0-9]{4}`` is the version of the db file. As ISIS kernel metadata is not version controlled, it uses a propriatary versioning system where the 4 numbers after the first extension delimiter is a version number.
**SPK Kernel DB file, $ISISDATA/messenger/kernels/spk/kernels.0184.db**
.. code-block:: text
Object = SpacecraftPosition
RunTime = 2015-09-14T13:41:46
Group = Dependencies
LeapsecondKernel = $base/kernels/lsk/naif0011.tls
End_Group
Group = Selection
Time = ("2004 AUG 03 07:14:39.393205 TDB",
"2015 APR 30 19:27:09.351488 TDB")
File = $messenger/kernels/spk/msgr_20040803_20150430_od431sc_2.bsp
Type = Reconstructed
End_Group
End_Object
End
Some explinations:
* ``Object = SpacecraftPosition`` specifiies this is SPK metadata.
* ``Group = Dependencies`` specifies Kernels required in order to furnsh these kernels and extract meaningful data, in this case, an LSK. Usually, this is an LSK or SCLK. Sometimes includes Kernels for other reference frames.
* ``Group = Selection`` specifies the criteria for selecting a particular kernel, in this case, the start and stop time range, the path of the Kernel and the Kernel quality.
For binary kernels, these DB files are generally not hand written, instead the higher level information is written into a script, usually named ``makedb`` with information on required time kernels, and regexes for different kernel qualities and the kernel type.
**Makedb script, ``$ISISDATA/messenger/kernels/spk/makedb``**
.. code-block:: bash
# Generate kernel database from existing kernels
kerneldbgen to='kernels.????.db' type=SPK \
recondir='$messenger/kernels/spk' \
reconfilter=msgr_20040803*.bsp \
lsk='$base/kernels/lsk/naif????.tls'
Things to note:
* ``type=SPK`` describes the Kernel type, this populates the Object name in the output PVL. In this case, ``Object = SpacecraftPosition``
* ``lsk='$base/kernels/lsk/naif????.tls`` describes the LSK dependencies, sometimes you also see ``SCLK=<path>`` in cases where a specific SCLK is required. Populates the dependencies group, in this case ``LeapsecondKernel = $base/kernels/lsk/naif0011.tls``
* ``reconfilter=msgr_20040803*.bsp`` describies the format (using Unix globbing wildcards) for this mission's reconstructed kernels for the given kernel type (SPK). Files matching these patterns are globbed, then:
1. kerneldb gen uses NAIFs cspice toolkit in order compute the time ranges for the kernels.
2. Different filtereed as labeled. ``predictfilter`` matches are labeled as predicted, ``reconfilter`` matches are labeled as reconstructed, etc.
3. This process work very well as the names for mission kernels are very well structured.
**Text Kernel file, $ISISDATA/mro/kernels/ik/kernels.0003.db**
.. code-block:: text
Object = Instrument
Group = Selection
Match = ("Instrument","InstrumentId","Marci")
File = ("mro", "kernels/ik/mro_marci_v??.ti")
EndGroup
Group = Selection
Match = ("Instrument","InstrumentId","HIRISE")
File = ("mro", "kernels/ik/mro_hirise_v??.ti")
EndGroup
Group = Selection
Match = ("Instrument","InstrumentId","CRISM")
File = ("mro", "kernels/ik/mro_crism_v??.ti")
EndGroup
EndObject
Here, like with most IK db files, the IK selection is instrument specific, so the ``Match`` key is used by ISIS to specify image label keywords that must match in order to select this kernel. These usually have no ``makledb`` and are hand written as scpice is not needed to extract kernel times.
Last example, a more complex example where kernel selection requires multiple kernels selected from the same kernel type, this is used in situations where a particualr kernel has time-based dependencies.
In this example, we use Messenger's MDIS. MDIS was mounted on a gimbal, so to get correct pointing information for MDIS relative to the Messenger spacecraft, the following frame chain is required:
.. code-block:: text
Messenger Spacecraft -> Gimbal -> MDIS
Therefore, we need one CK kernel for the spacecraft, the gimbal, and then for MDIS. ISIS accomplishes this by using 3 difference Kernel db files: ``messenger_kernels.[0-9]{4}.db``, ``messenger_mdis_att.[0-9]{4}.db`` and ``mdis_kernels.[0-9]{4}.db`` generated with seperate called to kerneldbgen. A hand written configuration file, called ``kernels.[0-9]{4}.conf``, is used to describe their relationships.
**Conf file for Messenger, $ISISDATA/messenger/kernels/ck/kernels.0001.conf**
.. code-block:: text
Object = Instrument
Group = Selection
Match = ("Instrument","InstrumentId","MDIS-WAC")
File = ("messenger", "kernels/ck/mdis_kernels.????.db")
File = ("messenger", "kernels/ck/messenger_mdis_att_kernels.????.db")
File = ("messenger", "kernels/ck/messenger_kernels.????.db")
End_Group
Group = Selection
Match = ("Instrument","InstrumentId","MDIS-NAC")
File = ("messenger", "kernels/ck/mdis_kernels.????.db")
File = ("messenger", "kernels/ck/messenger_mdis_att_kernels.????.db")
File = ("messenger", "kernels/ck/messenger_kernels.????.db")
End_Group
End_Object
End
**makedb file for Messenger, $ISISDATA/messenger/kernels/ck/kernels.0001.conf**
.. code-block:: text
# Generate kernel database from existing kernels
kerneldbgen to='messenger_kernels.????.db' type=CK \
recondir='$messenger/kernels/ck' \
reconfilter='msgr_????_v??.bc' \
smitheddir='$messenger/kernels/ck' \
smithedfilter='msgr_mdis_sc????_usgs_v?.bc' \
sclk='$messenger/kernels/sclk/messenger_????.tsc' \
lsk='$base/kernels/lsk/naif????.tls'
kerneldbgen to='messenger_mdis_att_kernels.????.db' type=CK \
recondir='$messenger/kernels/ck' \
reconfilter='msgr_mdis_sc??????_??????_sub_v?.bc' \
sclk='$messenger/kernels/sclk/messenger_????.tsc' \
lsk='$base/kernels/lsk/naif????.tls'
kerneldbgen to='mdis_kernels.????.db' type=CK \
recondir='$messenger/kernels/ck' \
reconfilter='msgr_mdis_gm??????_??????v?.bc' \
sclk='$messenger/kernels/sclk/messenger_????.tsc' \
lsk='$base/kernels/lsk/naif????.tls'
Here, you can the how dependencies on other CK kernels are expressed. As MDIS-WAC and MDIS-NAC have the same dependencies, the two groups are the same.
Everything here, except kernel start and stop times, require a knowledgable individual to manually label kernel qualities and dependencies. Times are cached by kerneldbgen_.
Translating .db/makedb files to a single configuration file
************************************************************
.. warning:: As this library is a work in progress, much of this is subject to change. With so many missions with different requirements, it is very possible that this format misses supporting specific requirements for some missions. If you have any suggested changes for this proccess, feel free to open an issue and start a discussion.
**Example Configuration File:**
Here, we'll run through an example config file, again for Messenger, encapsulating a lot of the information put into ISIS between the .db and makedb files.
.. literalinclude:: ../../SpiceQL/db/mess.json
:language: JSON
:linenos:
Things to note:
* the JSON format mimics ISIS's directory structure for kernels. With the tree:
.. code-block:: shell
<mission>.json
└── "<Frame>"
├── "<Binary Kernel Type>:"
│ └── "<Kernel Quality>:"
│ └── <list of regexes>
├── "<Text Kernel Type>:"
│ └── <list of regexes>
└── "deps:"
└── <list of json pointers>
* Kernel formats now use EMCAScript_ (i.e. regular expressions).
* No times are cached, these are simply computed at runtime.
* To resolve frame chains and dependencies on other JSON keywords, the deps object can be any arbitrary pointer to another JSON block which this frame depends on for SCLKs, CKs, etc.
* Instead of a configs going into a black box which resolves per image kernels, running a query returns JSON matching this format.
Example output for querying all kernels for Messenger: https://gist.github.com/Kelvinrr/3e383d62eead58c016b7165acdf64d60
The Flowchart™
**************
1. Identify the mission, create a new json file called <mission>.json, try to stick to NAIF abbreviations (see: https://naif.jpl.nasa.gov/pub/naif/pds/data/)
2. Do binary kernels first (CKs, SPKs, etc.):
* Look at the makedb file, translate the wildcards to EMCAScript_ and place them under their respective quality keyword
* If list of regexes (or any list) has only one element, feel free to write it as a string, not an array
* Check for a config file, make sure the dependencies on other time dependent kernels are handled with the `deps` keyword.
3. When doing text kernels:
* Simply look at the `kernel.[0-9]{4}.db` file (specifically the newest one). Mimic the regexes into EMCAScript_
* The key is to make sure the right text kernel is going to the right instrument. For example, IKs often are sometimes instrument specific, but also can be shared by all instruments on a spacecraft.
4. Test by creating a new gtest running `utils::listMissionKernels` and manually confirming that all kernels are accounted for. Looks at FunctionalTestsSpiceQueries.cpp for some examples.
5. Add new config files to CMakeLists.txt's SPICEQL_CONFIG_FILES variable to make sure they get installed.
.. warning:: As this library is a work in progress, testing these queries isn't 100% figured out, the long term, we will test against ISIS's kernel search and make sure we get the response.
breathe
sphinxcontrib.gist
m2r2
sphinx_material
\ No newline at end of file
openapi: '3.0.2'
info:
title: SpiceQL
version: '0.3.0'
servers:
- url: https://spiceql-deployment.prod-asc.chs.usgs.gov/v030
paths:
/strSclkToEt:
get:
summary: "Converts an SCLK string to an ephemeris time"
description: "By passing in the mission and valid SCLK string, get the ephemeris time\n"
parameters:
- name: "frameCode"
in: "query"
description: "Frame code for the mission"
required: true
schema:
type: "integer"
example: -85
- name: "mission"
in: "query"
description: "NAIF name for the mission"
required: true
schema:
type: "string"
example: "lro"
- name: "sclk"
in: "query"
description: "SCLK string"
required: true
schema:
type: "string"
example: "1/281199081:48971"
responses:
'200':
description: OK
x-amazon-apigateway-integration:
type: aws
httpMethod: POST
uri : "arn:aws:apigateway:us-west-2:lambda:path/2015-03-31/functions/arn:aws:lambda:us-west-2:950438895271:function:strSclkToEtlambda-prod/invocations"
requestTemplates :
"application/json": "{\"frameCode\": $input.params('frameCode'), \"mission\": \"$input.params('mission')\", \"sclk\": \"$input.params('sclk')\" }"
requestParameters : {
integration.request.querystring.frameCode: method.request.querystring.frameCode,
integration.request.querystring.mission: method.request.querystring.mission,
integration.request.querystring.sclk: method.request.querystring.sclk
}
responses:
default:
statusCode: '200'
responseTemplates:
"application/json": "'$input.body'"
/doubleSclkToEt:
get:
summary: "Converts a given double spacecraft clock time to an ephemeris time"
description: "Given a known frame code doubleSclkToEt converts a given spacecraft clock time as a double to an ephemeris time. Call this function if your clock time looks something like: 922997380.174174"
parameters:
- name: "frameCode"
in: "query"
description: "Frame code for the mission"
required: true
schema:
type: "integer"
example: -131
- name: "mission"
in: "query"
description: "NAIF name for the mission"
required: true
schema:
type: "string"
example: "kaguya"
- name: "sclk"
in: "query"
description: "SCLK double"
required: true
schema:
type: "number"
format: "double"
example: "922997380.174174"
responses:
'200':
description: OK
x-amazon-apigateway-integration:
type: aws
httpMethod: POST
uri : "arn:aws:apigateway:us-west-2:lambda:path/2015-03-31/functions/arn:aws:lambda:us-west-2:950438895271:function:doubleSclkToEtlambda-prod/invocations"
requestTemplates :
"application/json": "{\"frameCode\": $input.params('frameCode'), \"mission\": \"$input.params('mission')\", \"sclk\": $input.params('sclk') }"
requestParameters : {
integration.request.querystring.frameCode: method.request.querystring.frameCode,
integration.request.querystring.mission: method.request.querystring.mission,
integration.request.querystring.sclk: method.request.querystring.sclk
}
responses:
default:
statusCode: '200'
responseTemplates:
"application/json": "'$input.body'"
/utcToEt:
get:
summary: "Returns list of missions that are currently supported"
description: "By passing in UTC string, get the ephemeris time\n"
parameters:
- name: "utc"
in: "query"
description: "UTC string"
required: true
schema:
type: "string"
format: "date-time"
example: "2016-11-26 22:32:14.582000"
responses:
'200':
description: OK
x-amazon-apigateway-integration:
type: aws
httpMethod: POST
uri : "arn:aws:apigateway:us-west-2:lambda:path/2015-03-31/functions/arn:aws:lambda:us-west-2:950438895271:function:utcToEtlambda-prod/invocations"
requestTemplates :
"application/json": "{\"utc\": \"$input.params('utc')\"}"
requestParameters : {
integration.request.querystring.utc: method.request.querystring.utc
}
responses:
default:
statusCode: '200'
responseTemplates:
"application/json": "'$input.body'"
/translateNameToCode:
get:
description: "Switch between NAIF frame string name to integer frame code"
parameters:
- name: "frame"
in: "query"
description: "String to translate to a NAIF code"
required: true
schema:
type: "string"
example: "LRO_LROCWAC"
- name: "mission"
in: "query"
description: "Mission name as it relates to the config files"
required: false
schema:
type: "string"
example: "lro"
responses:
'200':
description: OK
x-amazon-apigateway-integration:
type: aws
httpMethod: POST
uri : "arn:aws:apigateway:us-west-2:lambda:path/2015-03-31/functions/arn:aws:lambda:us-west-2:950438895271:function:translateNameToCodelambda-prod/invocations"
requestTemplates :
"application/json": "{\"frame\": \"$input.params('frame')\", \"mission\":\"$input.params('mission')\"}"
requestParameters : {
integration.request.querystring.frame: method.request.querystring.frame,
integration.request.querystring.mission: method.request.querystring.mission
}
responses:
default:
statusCode: '200'
responseTemplates:
"application/json": "'$input.body'"
/translateCodeToName:
get:
description: "Switch between NAIF frame integer code to string frame name"
parameters:
- name: "frameCode"
in: "query"
description: "int NAIF frame code to translate"
required: true
schema:
type: "integer"
example: -85
- name: "mission"
in: "query"
description: "Mission name as it relates to the config files"
required: false
schema:
type: "string"
example: "lro"
responses:
'200':
description: OK
x-amazon-apigateway-integration:
type: aws
httpMethod: POST
uri : "arn:aws:apigateway:us-west-2:lambda:path/2015-03-31/functions/arn:aws:lambda:us-west-2:950438895271:function:translateCodeToNamelambda-prod/invocations"
requestTemplates :
"application/json": "{\"frame\": $input.params('frameCode'), \"mission\":\"$input.params('mission')\"}"
requestParameters : {
integration.request.querystring.frame: method.request.querystring.frameCode,
integration.request.querystring.mission: method.request.querystring.mission
}
responses:
default:
statusCode: '200'
responseTemplates:
"application/json": "'$input.body'"
/getTargetStates:
get:
description: 'Get states of target relative to observer for each provided time'
parameters:
- name: "ets"
in: "query"
description: "All ephemeris times to get states for"
required: true
schema:
type: "array"
items:
type: "number"
format: "double"
- name: "target"
in: "query"
description: "Target to get the state of"
required: true
schema:
type: "string"
- name: "observer"
in: "query"
description: "Observing body that the target state will be relative to"
required: true
schema:
type: "string"
- name: "frame"
in: "query"
description: "Output reference frame to be used"
required: true
schema:
type: "string"
- name: "abcorr"
in: "query"
description: "Aberration correction"
required: true
schema:
type: "string"
- name: "mission"
in: "query"
description: "Mission name as it relates to the config files"
required: true
schema:
type: "string"
responses:
'200':
description: OK
x-amazon-apigateway-integration:
type: aws
httpMethod: POST
uri : "arn:aws:apigateway:us-west-2:lambda:path/2015-03-31/functions/arn:aws:lambda:us-west-2:950438895271:function:getTargetStateslambda-prod/invocations"
requestTemplates :
"application/json": "{\"ets\": $input.params('ets'), \"target\":\"$input.params('target')\", \"observer\":\"$input.params('observer')\", \"frame\":\"$input.params('frame')\", \"abcorr\":\"$input.params('abcorr')\", \"mission\":\"$input.params('mission')\"}"
requestParameters : {
integration.request.querystring.ets: method.request.querystring.ets,
integration.request.querystring.target: method.request.querystring.target,
integration.request.querystring.observer: method.request.querystring.observer,
integration.request.querystring.frame: method.request.querystring.frame,
integration.request.querystring.abcorr: method.request.querystring.abcorr,
integration.request.querystring.mission: method.request.querystring.mission
}
responses:
default:
statusCode: '200'
responseTemplates:
"application/json": "'$input.body'"
/findMissionKeywords:
get:
description: 'Takes in a kernel key and returns the value associated with the inputted mission as a json'
parameters:
- name: "key"
in: "query"
description: "key - Kernel to get values from "
required: true
schema:
type: "string"
example: "INS-236820_CCD_CENTER"
- name: "mission"
in: "query"
description: "Mission name as it relates to the config files"
required: true
schema:
type: "string"
example: "mdis"
responses:
'200':
description: OK
x-amazon-apigateway-integration:
type: aws
httpMethod: POST
uri : "arn:aws:apigateway:us-west-2:lambda:path/2015-03-31/functions/arn:aws:lambda:us-west-2:950438895271:function:findMissionKeywordslambda-prod/invocations"
requestTemplates :
"application/json": "{\"key\": \"$input.params('key')\", \"mission\":\"$input.params('mission')\"}"
requestParameters : {
integration.request.querystring.key: method.request.querystring.key,
integration.request.querystring.mission: method.request.querystring.mission
}
responses:
default:
statusCode: '200'
responseTemplates:
"application/json": "'$input.body'"
/getTargetValues:
get:
description: 'Takes in an a Target key and returns the values associated as a vector.'
parameters:
- name: "target"
in: "query"
description: "Target to get values of"
required: true
schema:
type: "string"
example: "Earth"
- name: "key"
in: "query"
description: "Keyword for desired values"
required: true
schema:
type: "string"
example: "RADII"
- name: "mission"
in: "query"
description: "Mission name as it relates to the config files"
required: false
schema:
type: "string"
example: "kaguya"
responses:
'200':
description: OK
x-amazon-apigateway-integration:
type: aws
httpMethod: POST
uri : "arn:aws:apigateway:us-west-2:lambda:path/2015-03-31/functions/arn:aws:lambda:us-west-2:950438895271:function:getTargetValueslambda-prod/invocations"
requestTemplates :
"application/json": "{\"target\":\"$input.params('target')\", \"key\":\"$input.params('key')\", \"mission\":\"$input.params('mission')\"}"
requestParameters : {
integration.request.querystring.target: method.request.querystring.target,
integration.request.querystring.key: method.request.querystring.key,
integration.request.querystring.mission: method.request.querystring.mission
}
responses:
default:
statusCode: '200'
responseTemplates:
"application/json": "'$input.body'"
/getFrameInfo:
get:
description: ''
parameters:
- name: "frameCode"
in: "query"
description: "int NAIF frame code to get information on"
required: true
schema:
type: "integer"
example: -85000
- name: "mission"
in: "query"
description: "Mission name as it relates to the config files"
required: true
schema:
type: "string"
example: "lroc"
responses:
'200':
description: OK
x-amazon-apigateway-integration:
type: aws
httpMethod: POST
uri : "arn:aws:apigateway:us-west-2:lambda:path/2015-03-31/functions/arn:aws:lambda:us-west-2:950438895271:function:getFrameInfolambda-prod/invocations"
requestTemplates :
"application/json": "{\"frame\": $input.params('frameCode'), \"mission\":\"$input.params('mission')\"}"
requestParameters : {
integration.request.querystring.frameCode: method.request.querystring.frameCode,
integration.request.querystring.mission: method.request.querystring.mission
}
responses:
default:
statusCode: '200'
responseTemplates:
"application/json": "'$input.body'"
/getTargetFrameInfo:
get:
description: 'Takes in a target id and returns the frame name and frame code in json format.'
parameters:
- name: "targetId"
in: "query"
description: "Target ID"
required: true
schema:
type: integer
example: 499
- name: "mission"
in: "query"
description: "Mission name as it relates to the config files"
required: false
schema:
type: "string"
example: "mro"
x-amazon-apigateway-integration:
type: aws
httpMethod: POST
uri : "arn:aws:apigateway:us-west-2:lambda:path/2015-03-31/functions/arn:aws:lambda:us-west-2:950438895271:function:getTargetFrameInfolambda-prod/invocations"
requestTemplates :
"application/json": "{\"targetId\": $input.params('targetId'), \"mission\":\"$input.params('mission')\"}"
requestParameters : {
integration.request.querystring.targetId: method.request.querystring.targetId,
integration.request.querystring.mission: method.request.querystring.mission
}
responses:
default:
statusCode: '200'
responseTemplates:
"application/json": "'$input.body'"
...@@ -20,9 +20,8 @@ dependencies: ...@@ -20,9 +20,8 @@ dependencies:
- cereal - cereal
- spdlog - spdlog
- pip: - pip:
- breathe - mkdocs
- sphinx-material - mkdocs-swagger-ui-tag
- m2r2 - mkdoxy
- sphinxcontrib.gist
- check-jsonschema - check-jsonschema
- mkdocs-simple-blog
\ No newline at end of file
site_name: "SpiceQL Manual"
theme:
name: simple-blog
icon:
annotation: material/chevron-right-circle
font:
text: Inter
code: Roboto Mono
colors:
primary: blue
title: black
features:
- navigation.tabs
- navigation.path
- navigation.indexes
- search.suggest
- search.suggest
palette:
# Palette toggle for dark mode
- media: "(prefers-color-scheme: dark)"
scheme: slate
primary: yellow
accent: blue
toggle:
icon: material/toggle-switch-off-outline
name: Switch to light mode
# Palette toggle for light mode
- media: "(prefers-color-scheme: light)"
scheme: default
primary: yellow
accent: blue
toggle:
icon: material/toggle-switch
name: Switch to dark mode
nav:
- Home: index.md
- C++ API:
- SpiceQL C++ API:
- 'Links': 'SpiceQLCPPAPI/links.md'
- 'Classes':
- 'Class List': 'SpiceQLCPPAPI/annotated.md'
- 'Class Index': 'SpiceQLCPPAPI/classes.md'
- 'Class Hierarchy': 'SpiceQLCPPAPI/hierarchy.md'
- 'Class Members': 'SpiceQLCPPAPI/class_members.md'
- 'Class Member Functions': 'SpiceQLCPPAPI/class_member_functions.md'
- 'Class Member Variables': 'SpiceQLCPPAPI/class_member_variables.md'
- 'Class Member Typedefs': 'SpiceQLCPPAPI/class_member_typedefs.md'
- 'Class Member Enumerations': 'SpiceQLCPPAPI/class_member_enums.md'
- 'Namespaces':
- 'Namespace List': 'SpiceQLCPPAPI/namespaces.md'
- 'Namespace Members': 'SpiceQLCPPAPI/namespace_members.md'
- 'Namespace Member Functions': 'SpiceQLCPPAPI/namespace_member_functions.md'
- 'Namespace Member Variables': 'SpiceQLCPPAPI/namespace_member_variables.md'
- 'Namespace Member Typedefs': 'SpiceQLCPPAPI/namespace_member_typedefs.md'
- 'Namespace Member Enumerations': 'SpiceQLCPPAPI/namespace_member_enums.md'
- 'Variables': 'SpiceQLCPPAPI/variables.md'
- 'Macros': 'SpiceQLCPPAPI/macros.md'
- 'Files': 'SpiceQLCPPAPI/files.md'
- RESTful API:
- SpiceQL RESTful API:
- 'Home': 'RestAPI.md'
plugins:
- search
- swagger-ui-tag
- mkdoxy:
projects:
SpiceQLCPPAPI:
src-dirs: SpiceQL/src/ SpiceQL/include/
full-doc: True # if you want to generate full documentation
doxy-cfg: # standard doxygen configuration (key: value)
FILE_PATTERNS: "*.cpp *.h*" # specify file patterns to filter out
RECURSIVE: True # recursive search in source directories
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment