diff --git a/SpiceQL/include/inventory.h b/SpiceQL/include/inventory.h
index b6f27347d2335d8cb62f1597596b884b2c231b70..dd6d499d474099f525379c316c7e41d7034b246a 100644
--- a/SpiceQL/include/inventory.h
+++ b/SpiceQL/include/inventory.h
@@ -18,6 +18,8 @@ namespace SpiceQL {
     namespace Inventory { 
         nlohmann::json search_for_kernelset(std::string spiceql_mission, std::vector<std::string> types, double start_time=-std::numeric_limits<double>::max(), double stop_time=std::numeric_limits<double>::max(), 
                                       std::string ckQuality="smithed", std::string spkQuality="smithed", bool enforce_quality=false);
+        
+        std::string getDbFilePath();
 
         void create_database(); 
     }    
diff --git a/SpiceQL/src/inventory.cpp b/SpiceQL/src/inventory.cpp
index 18007c50eac08f755a81f19eadf63e88a6089b00..507eb4381c3f40901176c251f83064bb194b4a0d 100644
--- a/SpiceQL/src/inventory.cpp
+++ b/SpiceQL/src/inventory.cpp
@@ -4,6 +4,7 @@
 
 #include <nlohmann/json.hpp>
 #include <spdlog/spdlog.h>
+#include <ghc/fs_std.hpp>
 
 #include "inventory.h"
 #include "inventoryimpl.h"
@@ -26,7 +27,12 @@ namespace SpiceQL {
 
             return impl.search_for_kernelset(instrument, enum_types, start_time, stop_time, enum_ck_quality, enum_spk_quality, enforce_quality);
         }
-
+        
+        string getDbFilePath() { 
+            static std::string db_path = fs::path(getCacheDir()) / DB_HDF_FILE;
+            return db_path;
+        }
+ 
         void create_database() { 
             // force generate the database
             InventoryImpl db(true);
diff --git a/SpiceQL/src/inventoryimpl.cpp b/SpiceQL/src/inventoryimpl.cpp
index 4f3b4e2e928e4ebf3f53a0b9850d986f36ab8ad5..05cd963dac1e89e25d89d418044d5644b5f6e5c3 100644
--- a/SpiceQL/src/inventoryimpl.cpp
+++ b/SpiceQL/src/inventoryimpl.cpp
@@ -106,8 +106,8 @@ namespace SpiceQL {
           fc::BTreePair<double, size_t> p;
           p.first = sstimes.first; 
           p.second = index;
-          if(kernel_times->start_times.contains(p.first)) { 
-            p.first-=0.0000001; 
+          while(kernel_times->start_times.contains(p.first)) { 
+            p.first-=0.001; 
           } 
           kernel_times->start_times.insert(p);
 
@@ -115,8 +115,8 @@ namespace SpiceQL {
           p2.first = sstimes.second; 
           p2.second = index; 
 
-          if(kernel_times->stop_times.contains(p2.first)) { 
-            p2.first+=0.0000001; 
+          while(kernel_times->stop_times.contains(p2.first)) { 
+            p2.first+=0.001; 
           }  
           kernel_times->stop_times.insert(p2);
 
@@ -280,14 +280,12 @@ namespace SpiceQL {
               vector<string> file_paths_v = getKey<vector<string>>(DB_SPICE_ROOT_KEY+"/"+key+"/"+DB_TIME_FILES_KEY); 
 
               time_indices->file_paths = file_paths_v;
-              
+              SPDLOG_TRACE("Index, start time, stop time sizes: {}, {}, {}", file_index_v.size(), start_times_v.size(), stop_times_v.size());
               // load start_times 
               for(size_t i = 0; i < start_times_v.size(); i++) {
                 time_indices->start_times[start_times_v[i]] = file_index_v[i];
                 time_indices->stop_times[stop_times_v[i]] = file_index_v[i];
               }
-
-              found = true;
             }
             catch (runtime_error &e) { 
               // should probably replace with a more specific exception 
@@ -295,53 +293,61 @@ namespace SpiceQL {
               continue;
             }
           }
-          if (enforce_quality) break; // only interate once if quality is enforced 
-        }
 
-        if (time_indices) { 
-          SPDLOG_TRACE("NUMBER OF KERNELS: {}", time_indices->file_paths.size());
-          SPDLOG_TRACE("NUMBER OF START TIMES: {}", time_indices->start_times.size());
-          SPDLOG_TRACE("NUMBER OF STOP TIMES: {}", time_indices->stop_times.size()); 
-        } else { 
-          // no kernels found 
-          continue;
-        }
-        size_t iterations = 0; 
+          if (time_indices) { 
+            SPDLOG_TRACE("NUMBER OF KERNELS: {}", time_indices->file_paths.size());
+            SPDLOG_TRACE("NUMBER OF START TIMES: {}", time_indices->start_times.size());
+            SPDLOG_TRACE("NUMBER OF STOP TIMES: {}", time_indices->stop_times.size()); 
+          } else { 
+            // no kernels found 
+            continue;
+          }
+  
+          size_t iterations = 0; 
       
-        // init containers
-        unordered_set<size_t> start_time_kernels; 
-        vector<string> final_time_kernels;
-
-        // Get everything starting before the stop_time; 
-        auto start_upper_bound = time_indices->start_times.upper_bound(stop_time);
-        for(auto it = time_indices->start_times.begin() ;it != start_upper_bound; it++) {
-          iterations++;
-          start_time_kernels.insert(it->second);             
-        }
+          // init containers
+          unordered_set<size_t> start_time_kernels; 
+          vector<string> final_time_kernels;
+
+          // Get everything starting before the stop_time; 
+          auto start_upper_bound = time_indices->start_times.upper_bound(stop_time);
+          if(start_upper_bound == time_indices->start_times.begin() && start_upper_bound->first <= start_time)  { 
+             iterations++;
+             start_time_kernels.insert(start_upper_bound->second);  
+          }
+          for(auto it = time_indices->start_times.begin() ;it != start_upper_bound; it++) {
+            iterations++;
+            start_time_kernels.insert(it->second);             
+          }
 
-        SPDLOG_TRACE("NUMBER OF KERNELS MATCHING START TIME: {}", start_time_kernels.size()); 
+          SPDLOG_TRACE("NUMBER OF KERNELS MATCHING START TIME: {}", start_time_kernels.size()); 
 
-        // Get everything stopping after the start_time; 
-        auto stop_lower_bound = time_indices->stop_times.lower_bound(start_time);
-        if(time_indices->stop_times.end() == stop_lower_bound && start_time_kernels.contains(stop_lower_bound->second)) { 
-          final_time_kernels.push_back(time_indices->file_paths.at(stop_lower_bound->second));
-        }
-        else { 
-          for(auto &it = stop_lower_bound;it != time_indices->stop_times.end(); it++) { 
-            // if it's also in the start_time set, add it to the list
-            iterations++;
-            
-            if (start_time_kernels.contains(it->second)) {
-              final_time_kernels.push_back(data_dir / time_indices->file_paths.at(it->second));
-            }
-          } 
-        }
-        if (final_time_kernels.size()) { 
-          kernels[Kernel::translateType(type)] = final_time_kernels;
-          kernels[qkey] = Kernel::translateQuality(quality);
+          // Get everything stopping after the start_time; 
+          auto stop_lower_bound = time_indices->stop_times.lower_bound(start_time);
+          SPDLOG_TRACE("IS {} in the array? {}", stop_lower_bound->second, start_time_kernels.contains(stop_lower_bound->second)); 
+          if(time_indices->stop_times.end() == stop_lower_bound && stop_lower_bound->first >= stop_time && start_time_kernels.contains(stop_lower_bound->second)) { 
+            final_time_kernels.push_back(time_indices->file_paths.at(stop_lower_bound->second));
+          }
+          else { 
+            for(auto &it = stop_lower_bound;it != time_indices->stop_times.end(); it++) { 
+              // if it's also in the start_time set, add it to the list
+              iterations++;
+              SPDLOG_TRACE("IS {} in the array? {}", it->second, start_time_kernels.contains(it->second)); 
+              if (start_time_kernels.contains(it->second)) {
+                final_time_kernels.push_back(data_dir / time_indices->file_paths.at(it->second));
+              }
+            } 
+          }
+          if (final_time_kernels.size()) { 
+            found = true;
+            kernels[Kernel::translateType(type)] = final_time_kernels;
+            kernels[qkey] = Kernel::translateQuality(quality);
+          }
+          SPDLOG_TRACE("NUMBER OF ITERATIONS: {}", iterations);
+          SPDLOG_TRACE("NUMBER OF KERNELS FOUND: {}", final_time_kernels.size());  
+          
+          if (enforce_quality) break; // only interate once if quality is enforced 
         }
-        SPDLOG_TRACE("NUMBER OF ITERATIONS: {}", iterations);
-        SPDLOG_TRACE("NUMBER OF KERNELS FOUND: {}", final_time_kernels.size());  
       }
       else { // text/non time based kernels
         SPDLOG_DEBUG("Trying to search time independant kernels");
diff --git a/SpiceQL/tests/Fixtures.cpp b/SpiceQL/tests/Fixtures.cpp
index ecd36937affe9621a9d8ba3eac9469bcb807aaef..c9955257c96e225b2a61e487e8ba318fa86f2bca 100644
--- a/SpiceQL/tests/Fixtures.cpp
+++ b/SpiceQL/tests/Fixtures.cpp
@@ -54,9 +54,9 @@ void TempTestingFiles::SetUp() {
 
 
 void TempTestingFiles::TearDown() {
-    if(!fs::remove_all(tempDir)) {
-      throw runtime_error("Could not delete temporary files");
-    }
+    // if(!fs::remove_all(tempDir)) {
+    //   throw runtime_error("Could not delete temporary files");
+    // }
 }
 
 
@@ -171,6 +171,46 @@ void IsisDataDirectory::CompareKernelSets(vector<string> kVector, vector<string>
   }
 }
 
+void KernelsWithQualities::SetUp() { 
+  root = getenv("SPICEROOT");
+
+  fs::create_directory(root / "spk");
+
+  // we are using Mars odyssey here 
+  int bodyCode = -83000;
+  std::string referenceFrame = "j2000"; 
+
+  std::vector<double> times1 = {110000000, 120000000};
+  std::vector<double> times2 = {130000000, 140000000};
+
+  // create predicted SPK 
+
+  std::vector<std::vector<double>> velocities = {{1,1,1}, {2,2,2}};
+  std::vector<std::vector<double>> positions = {{1, 1, 1}, {2, 2, 2}};
+  spkPathPredict = root / "spk" / "m01_map.bsp";
+  writeSpk(spkPathPredict, positions, times1, bodyCode, 1, referenceFrame, "SPK ID 1", 1, velocities, "SPK 1");
+
+  // create reconstructed SPK  
+  spkPathRecon = root / "spk" / "m01_ab_v2.bsp";
+
+  writeSpk(spkPathRecon, positions, times1, bodyCode, 1, referenceFrame, "SPK ID 1", 1, velocities, "SPK 1"); 
+
+  // create another reconstructed SPK with different times 
+  spkPathRecon2 = root / "spk" / "m01_map_rec.bsp";
+
+  writeSpk(spkPathRecon2, positions, times2, bodyCode, 1, referenceFrame, "SPK ID 1", 1, velocities, "SPK 1"); 
+
+  spkPathSmithed = root / "spk" / "themis_dayir_merged_2018Jul13_spk.bsp";
+  writeSpk(spkPathSmithed, positions, times1, bodyCode, 1, referenceFrame, "SPK ID 1", 1, velocities, "SPK 1"); 
+  
+  Inventory::create_database();
+}
+
+
+void KernelsWithQualities::TearDown() {
+
+}
+
 
 void LroKernelSet::SetUp() {
   root = getenv("SPICEROOT");
diff --git a/SpiceQL/tests/Fixtures.h b/SpiceQL/tests/Fixtures.h
index 5eac58ba67033ace193dd18f7de0e342e7c42953..2c8ac1dbb7ce106832e26473c9913a8301c52d2c 100644
--- a/SpiceQL/tests/Fixtures.h
+++ b/SpiceQL/tests/Fixtures.h
@@ -66,6 +66,18 @@ class LroKernelSet : public ::testing::Test  {
     void TearDown() override;
 };
 
+class KernelsWithQualities : public ::testing::Test  {
+  protected:
+    fs::path root;
+    string spkPathPredict; 
+    string spkPathRecon; 
+    string spkPathRecon2; 
+    string spkPathSmithed; 
+
+    void SetUp() override;
+    void TearDown() override;
+};
+
 class TestConfig : public KernelDataDirectories {
   protected:
 
diff --git a/SpiceQL/tests/InventoryTests.cpp b/SpiceQL/tests/InventoryTests.cpp
index f39273b18e08622197527ca0a8dbe1cb65791f8a..b48012682731f1c28cfd2203ca461f70d3f6256b 100644
--- a/SpiceQL/tests/InventoryTests.cpp
+++ b/SpiceQL/tests/InventoryTests.cpp
@@ -6,20 +6,20 @@
 #include "Fixtures.h"
 
 #include "inventory.h"
+#include "inventoryimpl.h"
 
 #include <spdlog/spdlog.h>
-
+#include <highfive/highfive.hpp>
 
 TEST_F(LroKernelSet, TestInventorySmithed) { 
   Inventory::create_database();
-  nlohmann::json kernels = Inventory::search_for_kernelset("lroc", {"fk", "sclk", "spk", "ck"}, 110000000, 130000001);
+  nlohmann::json kernels = Inventory::search_for_kernelset("lroc", {"fk", "sclk", "spk", "ck"}, 110000000, 140000000);
   EXPECT_EQ(fs::path(kernels["fk"][0]).filename(), "lro_frames_1111111_v01.tf");
   EXPECT_EQ(fs::path(kernels["sclk"][0]).filename(), "lro_clkcor_2020184_v00.tsc");
   EXPECT_EQ(fs::path(kernels["ck"][0]).filename(), "soc31_1111111_1111111_v21.bc"); 
   
   EXPECT_EQ(kernels["spk"].size(), 3);
-  EXPECT_EQ(kernels["ck"].size(), 2);
-
+  
   EXPECT_EQ(kernels["ckQuality"], "reconstructed");  
   EXPECT_EQ(kernels["spkQuality"], "smithed");  
 }
@@ -51,3 +51,35 @@ TEST_F(LroKernelSet, TestInventoryEmpty) {
   EXPECT_TRUE(kernels.empty());
 }
 
+
+TEST_F(LroKernelSet, TestInventoryPortability) { 
+  fs::path dbfile = Inventory::getDbFilePath();
+  HighFive::File file(dbfile, HighFive::File::ReadOnly);
+  
+  auto dataset = file.getDataSet("spice/lroc/sclk/kernels");
+  vector<string> data = dataset.read<vector<string>>();   
+  dataset.read(data);
+
+  // assert that the path in the db is relative 
+  EXPECT_EQ(data.at(0), "clocks/lro_clkcor_2020184_v00.tsc");
+
+  nlohmann::json kernels = Inventory::search_for_kernelset("lroc", {"fk", "sclk", "spk", "ck"});
+
+  // these paths should be expanded
+  EXPECT_TRUE(kernels["sclk"][0].get<string>().size() > data.at(0).size());
+}
+
+
+TEST_F(KernelsWithQualities, TestUnenforcedQuality) { 
+  nlohmann::json kernels = Inventory::search_for_kernelset("odyssey", {"spk"}, 130000000, 140000000, "smithed", "smithed", false);
+  // smithed kernels should not exist so it should return reconstructed
+  EXPECT_EQ(kernels["spkQuality"].get<string>(), "reconstructed");
+}
+
+
+TEST_F(KernelsWithQualities, TestEnforcedQuality) { 
+  nlohmann::json kernels = Inventory::search_for_kernelset("odyssey", {"spk"}, 130000000, 140000000, "smithed", "smithed", true);
+  // Should be empty since we are enforcing smithed
+  EXPECT_TRUE(kernels.is_null());
+}
+