Skip to content
Snippets Groups Projects
Commit affbc873 authored by jay's avatar jay
Browse files

Fixes failing tests

parents e0c3fc93 1787fe00
No related branches found
No related tags found
No related merge requests found
......@@ -44,7 +44,7 @@ install:
- conda config --add channels conda-forge
- conda config --add channels jlaura
- conda install -c conda-forge gdal h5py
- conda install pandas sqlalchemy pyyaml
- conda install pandas sqlalchemy pyyaml networkx
- conda install -c jlaura pvl protobuf
# Development installation
......
......@@ -53,7 +53,7 @@ install:
- cmd: conda config --add channels conda-forge
- cmd: conda config --add channels jlaura
- cmd: conda install --yes -c conda-forge gdal h5py
- cmd: conda install --yes pandas sqlalchemy pyyaml
- cmd: conda install --yes pandas sqlalchemy pyyaml networkx
- cmd: conda install --yes -c jlaura protobuf pvl
# Development installation
......
from io import BytesIO
import json
import os
import warnings
from zipfile import ZipFile
from networkx.readwrite import json_graph
import numpy as np
import pandas as pd
try:
import autocnet
autocnet_avail = True
except:
autocnet_avail = False
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
"""If input object is an ndarray it will be converted into a dict
holding dtype, shape and the data, base64 encoded.
"""
if isinstance(obj, np.ndarray):
return dict(__ndarray__= obj.tolist(),
dtype=str(obj.dtype),
shape=obj.shape)
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
def save(network, projectname):
"""
Save an AutoCNet candiate graph to disk in a compressed file. The
graph adjacency structure is stored as human readable JSON and all
potentially large numpy arrays are stored as compressed binary. The
project archive is a standard .zip file that can have any ending,
e.g., <projectname>.project, <projectname>.zip, <projectname>.myname.
TODO: This func. writes a intermediary .npz to disk when saving. Can
we write the .npz to memory?
Parameters
----------
network : object
The AutoCNet Candidate Graph object
projectname : str
The PATH to the output file.
"""
# Convert the graph into json format
js = json_graph.node_link_data(network)
with ZipFile(projectname, 'w') as pzip:
js_str = json.dumps(js, cls=NumpyEncoder, sort_keys=True, indent=4)
pzip.writestr('graph.json', js_str)
# Write the array node_attributes to hdf
for n, data in network.nodes_iter(data=True):
grp = data['node_id']
np.savez('{}.npz'.format(data['node_id']),
descriptors=data.descriptors,
_keypoints=data._keypoints,
_keypoints_idx=data._keypoints.index,
_keypoints_columns=data._keypoints.columns)
pzip.write('{}.npz'.format(data['node_id']))
os.remove('{}.npz'.format(data['node_id']))
# Write the array edge attributes to hdf
for s, d, data in network.edges_iter(data=True):
if s > d:
s, d = d, s
grp = str((s,d))
np.savez('{}_{}.npz'.format(s, d),
matches=data.matches,
matches_idx=data.matches.index,
matches_columns=data.matches.columns,
_masks=data._masks,
_masks_idx=data._masks.index,
_masks_columns=data._masks.columns)
pzip.write('{}_{}.npz'.format(s, d))
os.remove('{}_{}.npz'.format(s, d))
def json_numpy_obj_hook(dct):
"""Decodes a previously encoded numpy ndarray with proper shape and dtype.
:param dct: (dict) json encoded ndarray
:return: (ndarray) if input was an encoded ndarray
"""
if isinstance(dct, dict) and '__ndarray__' in dct:
data = np.asarray(dct['__ndarray__'])
return np.frombuffer(data, dct['dtype']).reshape(dct['shape'])
return dct
def load(projectname):
if autocnet_avail is False:
warning.warn('AutoCNet Library is not available. Unable to load an AutoCNet CandidateGraph')
return
with ZipFile(projectname, 'r') as pzip:
# Read the graph object
with pzip.open('graph.json', 'r') as g:
data = json.loads(g.read().decode(),object_hook=json_numpy_obj_hook)
cg = autocnet.graph.network.CandidateGraph()
Edge = autocnet.graph.edge.Edge
Node = autocnet.graph.node.Node
# Reload the graph attributes
cg.graph = data['graph']
# Handle nodes
for d in data['nodes']:
n = Node(image_name=d['image_name'], image_path=d['image_path'], node_id=d['id'])
n['hash'] = d['hash']
# Load the byte stream for the nested npz file into memory and then unpack
nzf = np.load(BytesIO(pzip.read('{}.npz'.format(d['id']))))
n._keypoints = pd.DataFrame(nzf['_keypoints'], index=nzf['_keypoints_idx'], columns=nzf['_keypoints_columns'])
n.descriptors = nzf['descriptors']
cg.add_node(d['node_id'])
cg.node[d['node_id']] = n
for e in data['links']:
cg.add_edge(e['source'], e['target'])
edge = Edge()
edge.source = cg.node[e['source']]
edge.destination = cg.node[e['target']]
edge['fundamental_matrix'] = e['fundamental_matrix']
edge['weight'] = e['weight']
nzf = np.load(BytesIO(pzip.read('{}_{}.npz'.format(e['source'], e['target']))))
edge._masks = pd.DataFrame(nzf['_masks'], index=nzf['_masks_idx'], columns=nzf['_masks_columns'])
edge.matches = pd.DataFrame(nzf['matches'], index=nzf['matches_idx'], columns=nzf['matches_columns'])
# Add a mock edge
cg.edge[e['source']][e['target']] = edge
return cg
import os
import pandas as pd
import pvl
import numpy as np
from plio.utils.utils import find_in_dict
from plio.io.io_gdal import GeoDataset
class Spectral_Profiler(object):
......@@ -52,6 +53,7 @@ class Spectral_Profiler(object):
label = pvl.load(input_data)
self.label = label
self.input_data = input_data
with open(input_data, 'rb') as indata:
# Extract and handle the ancillary data
ancillary_data = find_in_dict(label, "ANCILLARY_AND_SUPPLEMENT_DATA")
......@@ -128,3 +130,20 @@ class Spectral_Profiler(object):
self.spectra[i] = self.spectra[i][self.spectra[i]['QA'] < qa_threshold]
self.spectra = pd.Panel(self.spectra)
def open_browse(self, extension='.jpg'):
"""
Attempt to open the browse image corresponding to the spc file
Parameters
----------
extension : str
The file type extension to be added to the base name
of the spc file.
Returns
-------
"""
path, ext = os.path.splitext(self.input_data)
self.browse = GeoDataset(path + extension)
......@@ -8,7 +8,7 @@ sys.path.insert(0, os.path.abspath('..'))
from plio.examples import get_path
from plio.io import io_spectral_profiler
from plio.io.io_gdal import GeoDataset
class Test_Spectral_Profiler_IO(unittest.TestCase):
......@@ -21,6 +21,11 @@ class Test_Spectral_Profiler_IO(unittest.TestCase):
self.assertIsInstance(ds.spectra, pd.Panel)
self.assertEqual(ds.spectra[0].columns.tolist(), ['RAW', 'REF1', 'REF2', 'QA'])
def test_read_browse(self):
ds = io_spectral_profiler.Spectral_Profiler(self.examplefile)
ds.open_browse()
self.assertIsInstance(ds.browse, GeoDataset)
self.assertEqual(ds.browse.read_array().shape, (512, 456))
if __name__ == '__main__':
unittest.main()
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment