diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000000000000000000000000000000000000..7508c06351deb3b0fefebec84fee5b8329c6f550
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,51 @@
+# Changelog
+
+All changes that impact users of this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+<!---
+This document is intended for users of the applications and API. Changes to things
+like tests should not be noted in this document.
+
+When updating this file for a PR, add an entry for your change under Unreleased
+and one of the following headings:
+ - Added - for new features.
+ - Changed - for changes in existing functionality.
+ - Deprecated - for soon-to-be removed features.
+ - Removed - for now removed features.
+ - Fixed - for any bug fixes.
+ - Security - in case of vulnerabilities.
+
+If the heading does not yet exist under Unreleased, then add it as a 3rd heading,
+with three #.
+
+
+When preparing for a public release candidate add a new 2nd heading, with two #, under
+Unreleased with the version number and the release date, in year-month-day
+format. Then, add a link for the new version at the bottom of this document and
+update the Unreleased link so that it compares against the latest release tag.
+
+
+When preparing for a bug fix release create a new 2nd heading above the Fixed
+heading to indicate that only the bug fixes and security fixes are in the bug fix
+release.
+-->
+
+## [0.6.0]
+
+### Added
+- Abstract DEM interface that supports ellipsoids #576
+- Fourier-Mellon subpixel registration #558
+
+### Changed
+- Updates center of gravity style subpixel matcher to use scipy.ndimage.center_of_mass of determining subpixel shifts
+- Use `kalisiris` instead of `pysis` #573
+- Moved `acn_submit` cluster submission script into the library and added tests for cluster submission #567
+- Point identifier no longer needs to be a unique string
+
+### Fixed
+- Image to ground to support multiple input types with proper output type handling #580
+- Support for ISIS special pixels in image data #577
+- Fix for no correlation map returned from `geom_match_simple` #556
diff --git a/autocnet/graph/asynchronous_funcs.py b/autocnet/graph/asynchronous_funcs.py
index 38b4cdbcafa2f687689a7b9660976da2fdcab6c9..f7f8e05d4f7da2f4caabe2d55f217cb107519a0e 100644
--- a/autocnet/graph/asynchronous_funcs.py
+++ b/autocnet/graph/asynchronous_funcs.py
@@ -7,7 +7,7 @@ from sqlalchemy.sql.expression import bindparam
 from autocnet.io.db.model import Points, Measures
 from autocnet.utils.serializers import object_hook
 
-def watch_insert_queue(queue, queue_name, counter_name, engine, stop_event):
+def watch_insert_queue(queue, queue_name, counter_name, engine, stop_event, sleep_time=5):
     """
     A worker process to be launched in a thread that will asynchronously insert or update 
     objects in the Session using dicts pulled from a redis queue. Using this queuing approach
@@ -98,9 +98,9 @@ def watch_insert_queue(queue, queue_name, counter_name, engine, stop_event):
                 measures = [measure for sublist in measures for measure in sublist]
                 conn.execute(
                     insert(Measures.__table__), measures)
-        time.sleep(5)
+        time.sleep(sleep_time)
 
-def watch_update_queue(queue, queue_name, counter_name, engine, stop_event):
+def watch_update_queue(queue, queue_name, counter_name, engine, stop_event, sleep_time=5):
     """
     A worker process to be launched in a thread that will asynchronously insert or update 
     objects in the Session using dicts pulled from a redis queue. Using this queuing approach
@@ -173,4 +173,4 @@ def watch_update_queue(queue, queue_name, counter_name, engine, stop_event):
                     stmt, measures
                 )
 
-    time.sleep(5)
+    time.sleep(sleep_time)
diff --git a/autocnet/graph/network.py b/autocnet/graph/network.py
index 1354e1f00201d5b039486f6aa6d512900809a018..1c19e19edc22b7f48f62e53200d0600a9e2f8209 100644
--- a/autocnet/graph/network.py
+++ b/autocnet/graph/network.py
@@ -2064,7 +2064,7 @@ class NetworkCandidateGraph(CandidateGraph):
                 else:
                     continue
 
-    def add_from_remote_database(self, source_db_config, path,  query_string='SELECT * FROM public.images LIMIT 10'):
+    def add_from_remote_database(self, source_db_config, path=None,  query_string='SELECT * FROM public.images LIMIT 10'):
         """
         This is a constructor that takes an existing database containing images and sensors,
         copies the selected rows into the project specified in the autocnet_config variable,
@@ -2089,7 +2089,8 @@ class NetworkCandidateGraph(CandidateGraph):
                The PATH to which images in the database specified in the config
                will be copied to. This method duplicates the data and copies it
                to a user defined PATH to avoid issues with updating image ephemeris
-               across projects.
+               across projects. The default PATH is (None), meaning the data will
+               not be copied.
 
         query_string : str
                        An optional string to select a subset of the images in the
@@ -2130,7 +2131,8 @@ class NetworkCandidateGraph(CandidateGraph):
         sourcesession.close()
 
         # Create the graph, copy the images, and compute the overlaps
-        self.copy_images(path)
+        if path:
+            self.copy_images(path)
         self.from_database()
         self._execute_sql(compute_overlaps_sql)
 
diff --git a/autocnet/graph/tests/test_cluster_submit.py b/autocnet/graph/tests/test_cluster_submit.py
index 32f1cd80a71068a9eace84b7c259dd2b2b995f03..bf36c0e0e7b081c727eaff2ab1ea80e87ebdf52b 100644
--- a/autocnet/graph/tests/test_cluster_submit.py
+++ b/autocnet/graph/tests/test_cluster_submit.py
@@ -1,7 +1,6 @@
 import json
 from unittest.mock import patch
 
-import fakeredis
 import numpy as np
 import pytest
 
@@ -17,10 +16,6 @@ def args():
     arg_dict = {'working_queue':'working',
                 'processing_queue':'processing'}
     return arg_dict
-            
-@pytest.fixture
-def queue():
-    return fakeredis.FakeStrictRedis()
 
 @pytest.fixture
 def simple_message():
diff --git a/autocnet/io/db/model.py b/autocnet/io/db/model.py
index 53878d295d24822a243b0a30b9540b1247a98d2a..d06b5ec216d8de98139e364015a44d7973581e31 100644
--- a/autocnet/io/db/model.py
+++ b/autocnet/io/db/model.py
@@ -413,7 +413,7 @@ class Points(Base, BaseMixin):
 
     id = Column(Integer, primary_key=True, autoincrement=True)
     _pointtype = Column("pointType", IntEnum(PointType), nullable=False)  # 2, 3, 4 - Could be an enum in the future, map str to int in a decorator
-    identifier = Column(String, unique=True)
+    identifier = Column(String)
     overlapid = Column(Integer, ForeignKey('overlay.id'))
     _geom = Column("geom", Geometry('POINT', srid=latitudinal_srid, dimension=2, spatial_index=True))
     cam_type = Column(String)
diff --git a/autocnet/matcher/ground.py b/autocnet/matcher/ground.py
index 9c8a64d766b5e27e7183372c405aec3478c9f374..0df2933bd04a2c33714f01a4b91cc2357184daf2 100644
--- a/autocnet/matcher/ground.py
+++ b/autocnet/matcher/ground.py
@@ -28,6 +28,7 @@ def propagate_ground_point(point,
                            cost=lambda x, y: y == np.max(x),
                            threshold=0.01,
                            ncg=None,
+                           preprocess=None,
                            Session=None):
     print(f'Attempting to propagate point {point.id}.')
 
@@ -47,12 +48,10 @@ def propagate_ground_point(point,
     lon = point.geom.x
     lat = point.geom.y
 
-    p = Point(lon, lat)  # lon, lat
-    new_measures = []
-
     # list of matching results in the format:
     # [measure_index, x_offset, y_offset, offset_magnitude]
-    match_results = []
+    best_correlation = -np.inf
+    best_match = None
     for _, image in images.iterrows():
         # When grounding to THEMIS the df has a PATH to the QUAD
         dest_image = GeoDataset(image["path"])
@@ -65,56 +64,29 @@ def propagate_ground_point(point,
             x,y, dist, metrics, corrmap = geom_match_simple(base_image, dest_image, sx, sy, 25, 25, \
                     match_func = match_func, \
                     match_kwargs=match_kwargs, \
+                    preprocess=preprocess,
                     verbose=verbose)
         except Exception as e:
-            raise Exception(e)
-            match_results.append(e)
+            print(e)
             continue
-
-        # Element 0 of each list is an id. We don't need this id because of how this was refactored to work
-        # in parallel. If we set to none though, the check below fails because one of the elements is None...
-        match_results.append(['foo',
-                              x,
-                              y,
-                              metrics,
-                              dist,
-                              corrmap,
-                              path,
-                              image["path"],
-                              image['id'],
-                              image['serial']])
-    print('Found the following matches: ', len(match_results))
-    print("MATCH RESULTS: ", match_results)
-    # get best offsets
-    match_results = np.asarray([res for res in match_results if isinstance(res, list) and all(r is not None for r in res)])
-    if match_results.shape[0] == 0:
-        print('No matches found')
-        # no matches
-        return new_measures
-
-    # column index 3 is the metric returned by the geom matcher
-    best_results = np.asarray([match for match in match_results if cost(match_results[:,3], match[3])])
-    print('BEST: ', best_results)
-    if best_results.shape[0] == 0:
-        # no matches satisfying cost
-        print("No best results.")
-        return new_measures
-
-    if verbose:
-        print("match_results final length: ", len(match_results))
-        print("best_results length: ", len(best_results))
-        print("Full results: ", best_results)
-        print("Winning CORRs: ", best_results[:,3], "Base Pixel shifts: ", best_results[:,4])
-        print("Base Images: ", best_results[:,6], "CTX images:", best_results[:,7])
-        print("Base Sample: ", sx, "CTX Samples: ", best_results[:,1])
-        print("Base Line: ", sy, "CTX Lines: ", best_results[:,2])
-        print('\n')
-
-    # if the single best results metric (returned by geom_matcher) is None
-    if len(best_results[:,3])==1 and best_results[:,3][0] is None:
-        print('Returning without a result as best is None.')
-        return new_measure
-
+        
+        if x is None:
+            print(f'Match returned None. Unable to match the ground point into image {image["path"]}.')
+            continue
+        
+        current_cost = cost(dist, metrics)
+        if current_cost > best_correlation and current_cost >= threshold:
+            print(f'Found a match with correlation: {metrics}, shift distance: {dist}, and cost {current_cost} ', )
+            best_correlation = current_cost
+            best_match = [x, y, metrics, dist, corrmap, path, image['path'], image['id'], image['serial']]
+        else:
+            print(f'Found a match, but that match is either less than the threshold or worse than the current best.')
+            print(f'Found a match with correlation: {metrics}, shift distance: {dist}, and cost {current_cost} ', )
+    
+    if best_match is None:
+        print('Unable to propagate this ground point into any images.')
+        return
+    
     dem = ncg.dem
     config = ncg.config
 
@@ -132,11 +104,12 @@ def propagate_ground_point(point,
                          semi_major, semi_minor,
                          'latlon', 'geocent')
 
-    row = best_results[0]
-    sample  = row[1]
-    line = row[2]
+    row = best_match
+    sample  = float(row[0])
+    line = float(row[1])
 
     # Create the point
+    # Question - do we need to get the z from the ground source?
     point_geom = Point(x,y,z)
     cam_type = 'isis'
     point = Points(apriori=point_geom,
@@ -147,14 +120,14 @@ def propagate_ground_point(point,
 
     # Add the measure that was the best match.
     # Set the line/sample and aprioriline/apriorisample to be identical.
-    point.measures.append(Measures(sample=row[1],
-                                   line=row[2],
-                                   apriorisample=row[1],
-                                   aprioriline=row[2],
-                                   imageid=row[8],
-                                   serial=row[9],
+    point.measures.append(Measures(sample=sample,
+                                   line=line,
+                                   apriorisample=sample,
+                                   aprioriline=line,
+                                   imageid=row[7],
+                                   serial=row[8],
                                    measuretype=2,
-                                   weight=row[3],  # metric
+                                   weight=float(row[2]),  # metric
                                    choosername='propagate_ground_point'))
 
     for _, image in images.iterrows():
@@ -165,18 +138,18 @@ def propagate_ground_point(point,
         if cam_type == "isis":
             try:
                 sample, line = isis.ground_to_image(image["path"], lon_oc, lat_oc)
-            except ProcessError as e:
+            except ValueError as e:
                 if 'Requested position does not project in camera model' in e.stderr:
                     print(f'interesting point ({lon_oc},{lat_oc}) does not project to image {images["image_path"]}')
                     continue
 
-        if image['id'] == best_results[0][8]:
+        if image['id'] == best_match[7]:
             continue  # The measures was already added above, simply update the apriori line/sample
 
-        point.measures.append(Measures(sample=sample,
-                                        line=line,
-                                        apriorisample=sample,
-                                        aprioriline=line,
+        point.measures.append(Measures(sample=float(sample),
+                                        line=float(line),
+                                        apriorisample=float(sample),
+                                        aprioriline=float(line),
                                         imageid=image['id'],
                                         serial=image['serial'],
                                         measuretype=3,
@@ -231,7 +204,7 @@ def find_most_interesting_ground(apriori_lon_lat,
     image = roi.Roi(ground_mosaic, sample, line, size_x=size, size_y=size)
     image_roi = image.clip(dtype=base_dtype)
 
-    interesting = extract_most_interesting(bytescale(image_roi),  extractor_parameters={'nfeatures':30})
+    interesting = extract_most_interesting(image_roi,  extractor_parameters={'nfeatures':30})
 
     if interesting is None:
         warnings.warn('No interesting feature found. This is likely caused by either large contiguous no data areas in the base or a mismatch in the base_dtype.')
diff --git a/autocnet/matcher/naive_template.py b/autocnet/matcher/naive_template.py
index 6a5f40ad48c8ea9f2ff9d16e41f18472c42747a6..36a26ff951a02ba79702bffcae39ae6c0d3c63f4 100644
--- a/autocnet/matcher/naive_template.py
+++ b/autocnet/matcher/naive_template.py
@@ -2,9 +2,10 @@ from math import floor
 import cv2
 import numpy as np
 from scipy.ndimage.interpolation import zoom
+from scipy.ndimage.measurements import center_of_mass
 
 
-def pattern_match_autoreg(template, image, subpixel_size=3, max_scaler=0.2, metric=cv2.TM_CCORR_NORMED):
+def pattern_match_autoreg(template, image, subpixel_size=3, metric=cv2.TM_CCOEFF_NORMED):
     """
     Call an arbitrary pattern matcher using a subpixel approach where a center of gravity using
     the correlation coefficients are used for subpixel alignment.
@@ -36,47 +37,50 @@ def pattern_match_autoreg(template, image, subpixel_size=3, max_scaler=0.2, metr
                The strength of the correlation in the range [-1, 1].
     """
 
+    # Apply the pixel scale template matcher
     result = cv2.matchTemplate(image, template, method=metric)
 
+    # Find the 'best' correlation
     if metric == cv2.TM_SQDIFF or metric == cv2.TM_SQDIFF_NORMED:
         y, x = np.unravel_index(np.argmin(result, axis=None), result.shape)
     else:
         y, x = np.unravel_index(np.argmax(result, axis=None), result.shape)
     max_corr = result[(y,x)]
-
+    
+    # Get the area around the best correlation; the surface
     upper = int(2 + floor(subpixel_size / 2))
     lower = upper - 1
     # x, y are the location of the upper left hand corner of the template in the image
     area = result[y-lower:y+upper,
                   x-lower:x+upper]
 
+    # If the area is not square and large enough, this method should fail
     if area.shape != (subpixel_size+2, subpixel_size+2):
         print("Max correlation is too close to the boundary.")
         return None, None, 0, None
 
-    # Find the max on the edges, scale just like autoreg (but why?)
-    edge_max = np.max(np.vstack([area[0], area[-1], area[:,0], area[:,-1]]))
-    internal = area[1:-1, 1:-1]
-    mask = (internal > edge_max + max_scaler * (edge_max-max_corr)).flatten()
-
-    empty = np.column_stack([np.repeat(np.arange(0,subpixel_size),subpixel_size),
-                             np.tile(np.arange(0,subpixel_size),subpixel_size),
-                             np.zeros(subpixel_size*subpixel_size)])
-    empty[:,-1] = internal.ravel()
-
-    to_weight = empty[mask, :]
-    # Average is the shift from y, x form
-    average = np.average(to_weight[:,:2], axis=0, weights=to_weight[:,2])
-
-    # The center of the 3x3 window is 1.5,1.5, so the shift needs to be recentered to 0,0
-    y += (subpixel_size/2 - average[0])
-    x += (subpixel_size/2 - average[1])
-
+    cmass = center_of_mass(area)
+    subpixel_y_shift = subpixel_size - 1 - cmass[0]
+    subpixel_x_shift = subpixel_size - 1 - cmass[1]
+    
+    # Apply the subpixel shift to the whole pixel shifts computed above
+    y += subpixel_y_shift
+    x += subpixel_x_shift
+    
     # Compute the idealized shift (image center)
-    y -= (image.shape[0] / 2) - (template.shape[0] / 2)
-    x -= (image.shape[1] / 2) - (template.shape[1] / 2)
-
-    return float(x), float(y), float(max_corr), result
+    ideal_y = image.shape[0] / 2
+    ideal_x = image.shape[1] / 2
+    
+    #Compute the shift from the template upper left to the template center
+    y += (template.shape[0] / 2)
+    x += (template.shape[1] / 2)
+    
+    print('Results after shifting from the UL corner to the center of the template: ', y, x)
+    
+    x -= ideal_x
+    y -= ideal_y
+    
+    return x, y, max_corr, result
 
 def pattern_match(template, image, upsampling=16, metric=cv2.TM_CCOEFF_NORMED, error_check=False):
     """
diff --git a/autocnet/matcher/subpixel.py b/autocnet/matcher/subpixel.py
index 7e4baae9a6e2ef558e1158124de3d42c9968cd8e..1a362d1375e08eff7db2d7806bc4d1c4f7c0dcef 100644
--- a/autocnet/matcher/subpixel.py
+++ b/autocnet/matcher/subpixel.py
@@ -55,7 +55,6 @@ def check_geom_func(func):
 
     raise Exception(f"{func} not a valid geometry function.")
 
-
 def check_match_func(func):
     match_funcs = {
         "classic": subpixel_template_classic,
@@ -72,7 +71,6 @@ def check_match_func(func):
 
     raise Exception(f"{func} not a valid matching function.")
 
-
 # TODO: look into KeyPoint.size and perhaps use to determine an appropriately-sized search/template.
 def _prep_subpixel(nmatches, nstrengths=2):
     """
@@ -141,7 +139,6 @@ def check_image_size(imagesize):
     y = floor(y/2)
     return x,y
 
-
 def clip_roi(img, center_x, center_y, size_x=200, size_y=200, dtype="uint64"):
     """
     Given an input image, clip a square region of interest
@@ -199,7 +196,6 @@ def clip_roi(img, center_x, center_y, size_x=200, size_y=200, dtype="uint64"):
             return None, 0, 0
     return subarray, axr, ayr
 
-
 def subpixel_phase(sx, sy, dx, dy,
                    s_img, d_img,
                    image_size=(51, 51),
@@ -273,7 +269,6 @@ def subpixel_phase(sx, sy, dx, dy,
 
     return dx, dy, error, None
 
-
 def subpixel_transformed_template(sx, sy, dx, dy,
                                   s_img, d_img,
                                   transform,
@@ -446,7 +441,6 @@ def subpixel_transformed_template(sx, sy, dx, dy,
 
     return dx, dy, metrics, corrmap
 
-
 def subpixel_template_classic(sx, sy, dx, dy,
                               s_img, d_img,
                               image_size=(251, 251),
@@ -495,16 +489,24 @@ def subpixel_template_classic(sx, sy, dx, dy,
 
     image_size = check_image_size(image_size)
     template_size = check_image_size(template_size)
-    print('source image avg: ', s_img.mean())
-    print('dest image avg: ', d_img.mean())
+
+    # In ISIS source image is the search and destination image is the pattern.
+    # In ISIS the search is CTX and the pattern is THEMIS
+    # So the data that are being used are swapped between autocnet and ISIS.
+    
+    print('source image avg: ', s_img.mean())  #THEMIS
+    print('dest image avg: ', d_img.mean())  # CTX
 
     s_roi = roi.Roi(s_img, sx, sy, size_x=image_size[0], size_y=image_size[1])
     d_roi = roi.Roi(d_img, dx, dy, size_x=template_size[0], size_y=template_size[1])
 
+    print('Source: ', sx, sy, d_roi.x, d_roi.y)
+    print('Destination ',dx, dy, s_roi.x, s_roi.y )
+
     print('d shape', d_roi.clip().shape)
     print('d mean: ', d_roi.clip().mean())
     print(f'd mm: {d_roi.clip().min()} {d_roi.clip().max()}')
-    print(f'{len(isis.get_isis_special_pixels(d_roi.clip()))} chip sps : ', isis.get_isis_special_pixels(d_roi.clip()))
+    #print(f'{len(isis.get_isis_special_pixels(d_roi.clip()))} chip sps : ', isis.get_isis_special_pixels(d_roi.clip()))
 
     s_image = s_roi.clip()
     d_template = d_roi.clip()
@@ -512,22 +514,22 @@ def subpixel_template_classic(sx, sy, dx, dy,
     print('s shape', s_image.shape)
     print('s mean: ', s_image.mean())
     print(f's mm: {s_image.min()} {s_image.max()}')
-    print(f'{len(isis.get_isis_special_pixels(s_image))} chip sps: ', isis.get_isis_special_pixels(s_image))
+    #print(f'{len(isis.get_isis_special_pixels(s_image))} chip sps: ', isis.get_isis_special_pixels(s_image))
 
     if d_roi.variance == 0:
+        warnings.warn('Input ROI has no variance.')
         return [None] * 4
 
     if (s_image is None) or (d_template is None):
         return None, None, None, None
 
-    shift_x, shift_y, metrics, corrmap = func(d_template, s_image, **kwargs)
+    shift_x, shift_y, metrics, corrmap = func(d_template.astype('float32'), s_image.astype('float32'), **kwargs)
 
+    # Apply the shift and return
     dx = d_roi.x - shift_x
     dy = d_roi.y - shift_y
-
     return dx, dy, metrics, corrmap
 
-
 def subpixel_template(sx, sy, dx, dy,
                       s_img, d_img,
                       image_size=(251, 251),
@@ -645,7 +647,6 @@ def subpixel_template(sx, sy, dx, dy,
 
     return dx, dy, metrics, corrmap
 
-
 def subpixel_ciratefi(sx, sy, dx, dy, s_img, d_img, search_size=251, template_size=51, **kwargs):
     """
     Uses a pattern-matcher on subsets of two images determined from the passed-in keypoints and optional sizes to
@@ -697,7 +698,6 @@ def subpixel_ciratefi(sx, sy, dx, dy, s_img, d_img, search_size=251, template_si
     dy += (y_offset + t_roi.ayr)
     return dx, dy, strength
 
-
 def iterative_phase(sx, sy, dx, dy, s_img, d_img, size=(51, 51), reduction=11, convergence_threshold=1.0, max_dist=50, **kwargs):
     """
     Iteratively apply a subpixel phase matcher to source (s_img) and destination (d_img)
@@ -769,7 +769,6 @@ def iterative_phase(sx, sy, dx, dy, s_img, d_img, size=(51, 51), reduction=11, c
 
     return dx, dy, metrics
 
-
 def estimate_affine_transformation(destination_coordinates, source_coordinates):
     """
     Given a set of destination control points compute the affine transformation
@@ -793,7 +792,6 @@ def estimate_affine_transformation(destination_coordinates, source_coordinates):
 
     return tf.estimate_transform('affine', destination_coordinates, source_coordinates)
 
-
 def geom_match_simple(base_cube,
                        input_cube,
                        bcenter_x,
@@ -802,8 +800,8 @@ def geom_match_simple(base_cube,
                        size_y=60,
                        match_func="classic",
                        match_kwargs={"image_size":(101,101), "template_size":(31,31)},
-                       phase_kwargs=None,
-                       verbose=True):
+                       preprocess=None,
+                       verbose=False):
     """
     Propagates a source measure into destination images and then perfroms subpixel registration.
     Measure creation is done by projecting the (lon, lat) associated with the source measure into the
@@ -891,7 +889,8 @@ def geom_match_simple(base_cube,
     base_corners = [(base_startx,base_starty),
                     (base_startx,base_stopy),
                     (base_stopx,base_stopy),
-                    (base_stopx,base_starty)]
+                    (base_stopx,base_starty),
+                    (bcenter_x, bcenter_y)]
 
     dst_corners = []
     for x,y in base_corners:
@@ -900,20 +899,15 @@ def geom_match_simple(base_cube,
             dst_corners.append(
                 spatial.isis.ground_to_image(input_cube.file_name, lon, lat)
             )
-        except CalledProcessError as e:
-            if 'Requested position does not project in camera model' in e.stderr:
-                print(f'Skip geom_match; Region of interest corner located at ({lon}, {lat}) does not project to image {input_cube.base_name}')
-                return None, None, None, None, None
+        except: pass
+
+    if len(dst_corners) < 3:
+        raise ValueError('Unable to find enough points to compute an affine transformation.')
 
     base_gcps = np.array([*base_corners])
 
     dst_gcps = np.array([*dst_corners])
 
-    start_x = dst_gcps[:,0].min()
-    start_y = dst_gcps[:,1].min()
-    stop_x = dst_gcps[:,0].max()
-    stop_y = dst_gcps[:,1].max()
-
     affine = tf.estimate_transform('affine', np.array([*base_gcps]), np.array([*dst_gcps]))
     t2 = time.time()
     print(f'Estimation of the transformation took {t2-t1} seconds.')
@@ -942,9 +936,12 @@ def geom_match_simple(base_cube,
     # These parameters seem to work best, should pass as kwargs later
     print('dst_arr mean: ', dst_arr.mean())
     print(f'dst_arr mm: {dst_arr.min()} {dst_arr.max()}')
-    print(f'special pixels: ', isis.get_isis_special_pixels(dst_arr))
+    #print(f'special pixels: ', isis.get_isis_special_pixels(dst_arr))
     
-    restemplate = match_func(bcenter_x, bcenter_y, bcenter_x, bcenter_y, bytescale(base_arr, cmin=0), bytescale(dst_arr, cmin=0), **match_kwargs)
+    if preprocess:
+        base_arr, dst_arr = preprocess(base_arr, dst_arr)
+
+    restemplate = match_func(bcenter_x, bcenter_y, bcenter_x, bcenter_y, base_arr, dst_arr, **match_kwargs)
     t4 = time.time()
     print(f'Matching took {t4-t3} seconds')
 
@@ -1445,6 +1442,7 @@ def subpixel_register_point(pointid,
                             match_kwargs={},
                             use_cache=False,
                             verbose=False,
+                            chooser='subpixel_register_point',
                             **kwargs):
 
     """
@@ -1542,7 +1540,7 @@ def subpixel_register_point(pointid,
 
         destination_node = nodes[measure.imageid]
 
-        print('geom_match image:', res.path)
+        print('geom_match image:', destination_node['image_path'])
         print('geom_func', geom_func)
         try:
             # new geom_match has a incompatible API, until we decide on one, put in if.
diff --git a/autocnet/matcher/tests/test_naive_template.py b/autocnet/matcher/tests/test_naive_template.py
index 3c7b9f213bacf12a4f0db87158935e25b57dcae9..e2480eca23ded4ec67b8a26c2726a9e72aa9a074 100644
--- a/autocnet/matcher/tests/test_naive_template.py
+++ b/autocnet/matcher/tests/test_naive_template.py
@@ -3,23 +3,24 @@ import pytest
 import unittest
 from .. import naive_template
 import numpy as np
+import cv2
 
 class TestNaiveTemplateAutoReg(unittest.TestCase):
 
     def setUp(self):
         self._test_image = np.array(((0, 0, 0, 0, 0, 0, 0, 0, 0),
-                       (0, 0, 0, 0, 0, 0, 0, 0, 0),
-                       (0, 0, 0, 0, 0, 0, 0, 0, 0),
-                       (0, 0, 0, 0, 1, 1, 0, 0, 0),
-                       (0, 0, 0, 0, 0, 1, 0, 0, 0),
-                       (0, 0, 0, 1, 1, 1, 0, 0, 0),
-                       (0, 0, 0, 1, 0, 0, 0, 0, 0),
-                       (0, 0, 0, 1, 0, 0, 0, 0, 0),
-                       (0, 0, 0, 0, 0, 0, 0, 0, 0),
-                       (0, 0, 0, 0, 0, 0, 0, 0, 0),
-                       (0, 0, 0, 0, 0, 0, 0, 0, 0),
-                       (0, 0, 0, 0, 0, 0, 0, 0, 0),
-                       (0, 0, 0, 0, 0, 0, 0, 0, 0)), dtype=np.uint8)
+                                     (0, 0, 0, 0, 0, 0, 0, 0, 0),
+                                     (0, 0, 0, 1, 1, 1, 0, 0, 0),
+                                     (0, 0, 0, 0, 0, 1, 0, 0, 0),
+                                     (0, 0, 0, 0, 0, 1, 0, 0, 0),
+                                     (0, 0, 0, 1, 1, 1, 0, 0, 0),
+                                     (0, 0, 0, 1, 0, 1, 0, 0, 0),
+                                     (0, 0, 0, 1, 0, 0, 0, 0, 0),
+                                     (0, 0, 0, 0, 0, 0, 0, 0, 0),
+                                     (0, 0, 0, 0, 0, 0, 0, 0, 0),
+                                     (0, 0, 0, 0, 0, 0, 0, 0, 0),
+                                     (0, 0, 0, 0, 0, 0, 0, 0, 0),
+                                     (0, 0, 0, 0, 0, 0, 0, 0, 0)), dtype=np.uint8)
 
         self._shape = np.array(((1, 1, 1),
                                 (1, 0, 1),
@@ -28,10 +29,11 @@ class TestNaiveTemplateAutoReg(unittest.TestCase):
 
     def test_subpixel_shift(self):
         result_x, result_y, result_strength, _ = naive_template.pattern_match_autoreg(self._shape,
-                                                                                   self._test_image)
-        self.assertEqual(result_x, 0.5)
-        self.assertEqual(result_y, -1.5)
-        self.assertGreaterEqual(result_strength, 0.8)
+                                                                                      self._test_image,
+                                                                                      cv2.TM_CCORR_NORMED)
+        print(result_x, result_y)
+        np.testing.assert_almost_equal(result_x, 0.167124, decimal=5)
+        np.testing.assert_almost_equal(result_y, -1.170976, decimal=5)
 
 class TestNaiveTemplate(unittest.TestCase):
 
diff --git a/autocnet/spatial/isis.py b/autocnet/spatial/isis.py
index d7962bc9d031134981fb2894a934f7bc81b46574..528f29e3b4014db422b14cf9b771dd05308547bf 100644
--- a/autocnet/spatial/isis.py
+++ b/autocnet/spatial/isis.py
@@ -102,7 +102,6 @@ def get_nodata_bounds(arr):
     # subtract 1 to exclude the special pixel
     x_dist = abs(cx - nearx) - 1
     y_dist = abs(cy - neary) - 1
-    print(x_dist, y_dist)
 
     # left_x, right_x, top_y, bottom_y
     left_x = cx - x_dist
@@ -196,12 +195,6 @@ def point_info(
             f"numbers, they were: {x} and {y}"
         )
 
-    if point_type == "image":
-        # convert to ISIS pixels
-        for i in range(len(x_coords)):
-            x_coords[i] += 0.5
-            y_coords[i] += 0.5
-
     results = []
     if pvl.load(cube_path).get("IsisCube").get("Mapping"):
         # We have a projected image, and must use mappt
@@ -215,16 +208,20 @@ def point_info(
                     coordsys="UNIVERSAL"
                 ),
                 "image": dict(
-                    sample=xx,
-                    line=yy,
+                    # Convert PLIO pixels to ISIS pixels
+                    sample=xx+0.5,
+                    line=yy+0.5
                 )
             }
             for k in mappt_args.keys():
                 mappt_args[k].update(mappt_common_args)
+            mapres = pvl.loads(isis.mappt(cube_path, **mappt_args[point_type]).stdout)["Results"]
+            
+            # convert from ISIS pixels to PLIO pixels
+            mapres['Sample'] = mapres['Sample'] - 0.5
+            mapres['Line'] = mapres['Line'] - 0.5
 
-            results.append(pvl.loads(
-                isis.mappt(cube_path, **mappt_args[point_type]).stdout
-            )["Results"])
+            results.append(mapres)
     else:
         # Not projected, use campt
         if point_type == "ground":
@@ -233,7 +230,7 @@ def point_info(
             p_list = [f"{lat}, {lon}" for lon, lat in zip(x_coords, y_coords)]
         else:
             p_list = [
-                f"{samp}, {line}" for samp, line in zip(x_coords, y_coords)
+                f"{samp+0.5}, {line+0.5}" for samp, line in zip(x_coords, y_coords)
             ]
 
         # ISIS's campt needs points in a file
diff --git a/autocnet/utils/serializers.py b/autocnet/utils/serializers.py
index d9c4256b9cfa529e873ab5470c26aefd8b90da88..c2e1b756e4ef8dc9523b7359558862a8d2ad5ecb 100644
--- a/autocnet/utils/serializers.py
+++ b/autocnet/utils/serializers.py
@@ -18,8 +18,8 @@ class JsonEncoder(json.JSONEncoder):
         if isinstance(obj, bytes):
             return obj.decode('utf-8')
         if isinstance(obj, set):
-            return list(obj)
-        if isinstance(obj,  shapely.geometry.base.BaseGeometry):
+            return sorted(list(obj))
+        if isinstance(obj, shapely.geometry.base.BaseGeometry):
             return obj.wkt
         if callable(obj):
             return encodebytes(dill.dumps(obj)).decode()
diff --git a/autocnet/utils/tests/test_serializers.py b/autocnet/utils/tests/test_serializers.py
index 0627ff05fad91d238d3141130b189f1ab782aeab..174b848c38d55caf331a6e99326ad52305783afe 100644
--- a/autocnet/utils/tests/test_serializers.py
+++ b/autocnet/utils/tests/test_serializers.py
@@ -22,6 +22,8 @@ def test_json_encoder(data, serialized):
     res = json.loads(res, object_hook=object_hook)
     if isinstance(res['foo'], list):
         res['foo'] = sorted(res['foo'])
+    elif isinstance(res['foo'], Point):
+        res['foo'] = res['foo'].wkt
     assert res == serialized
 
 @pytest.mark.parametrize("data", [
diff --git a/conda/meta.yaml b/conda/meta.yaml
index 39c6a3e4ecea1355e146ac37e2a042e07ae3b7b8..92422458b140fcfe731046b974a2d8842fa37d93 100644
--- a/conda/meta.yaml
+++ b/conda/meta.yaml
@@ -1,6 +1,6 @@
 package:
   name: autocnet
-  version: 0.4.1
+  version: 0.6
   
 channels:
   - conda-forge
diff --git a/conftest.py b/conftest.py
index 2a37a2501c3fec2092bff5105e22b8fc933118cc..cbaa53fa574e3ac53fc2fa838e6cf62d6f7dbbe0 100644
--- a/conftest.py
+++ b/conftest.py
@@ -1,6 +1,7 @@
 import os
 from unittest.mock import Mock, MagicMock, PropertyMock
 
+import fakeredis
 import networkx as nx
 import numpy as np
 import pandas as pd
@@ -14,6 +15,11 @@ from autocnet.io.db import model
 
 from plio.io.io_gdal import GeoDataset
 
+@pytest.fixture
+def queue():
+    return fakeredis.FakeStrictRedis()
+
+
 @pytest.fixture(scope='session')
 def candidategraph(node_a, node_b, node_c):
     # TODO: Getting this fixture from the global conf is causing deepycopy
diff --git a/setup.py b/setup.py
index c694ca0661a75c31bf90050e7049d87f98056add..06697bfe16e68761cc62fca19649d55ccfb96844 100755
--- a/setup.py
+++ b/setup.py
@@ -23,7 +23,7 @@ def setup_package():
 
     setup(
         name = "autocnet",
-        version = '0.5.0',
+        version = '0.6.0',
         author = "Jay Laura",
         author_email = "jlaura@usgs.gov",
         description = ("I/O API to support planetary data formats."),