diff --git a/knoten/bundle.py b/knoten/bundle.py index ed64bdc82a3c1a1df7aa880ccffd956fb3c12d42..15c4ed9ab89151e006a45ab567910eb0ed1de900 100644 --- a/knoten/bundle.py +++ b/knoten/bundle.py @@ -3,6 +3,8 @@ import pandas as pd import pvl import os import csmapi +import itertools +from math import floor from pysis import isis from ale.drivers import loads @@ -64,6 +66,9 @@ def closest_approach(points, direction): ------- : array The (x, y, z) point that is closest to all of the lines + : ndarray + The (x, y, z) covariance matrix that describes the uncertaintly of the + point """ num_lines = points.shape[0] design_mat = np.zeros((num_lines * 3, 3)) @@ -73,8 +78,10 @@ def closest_approach(points, direction): line = direction[i] / np.linalg.norm(direction[i]) design_mat[3*i:3*i+3] = np.identity(3) - np.outer(line, line) rhs[3*i:3*i+3] = np.dot(point,line) * line + point - closest_point = np.linalg.lstsq(design_mat, rhs, rcond=None)[0] - return closest_point + N_inv = np.linalg.inv(design_mat.T.dot(design_mat)) + closest_point = N_inv.dot(design_mat.T).dot(rhs) + + return closest_point, N_inv def compute_apriori_ground_points(network, sensors): """ @@ -103,9 +110,15 @@ def compute_apriori_ground_points(network, sensors): locus = sensors[row["serialnumber"]].imageToRemoteImagingLocus(measure) positions.append([locus.point.x, locus.point.y, locus.point.z]) look_vecs.append([locus.direction.x, locus.direction.y, locus.direction.z]) - ground_pt = closest_approach(np.array(positions), np.array(look_vecs)) + ground_pt, covar_mat = closest_approach(np.array(positions), np.array(look_vecs)) + covar_vec = [covar_mat[0,0], covar_mat[0,1], covar_mat[0,2], + covar_mat[1,1], covar_mat[1,2], covar_mat[2,2]] network.loc[network.id == point_id, ["aprioriX", "aprioriY", "aprioriZ"]] = ground_pt network.loc[network.id == point_id, ["adjustedX", "adjustedY", "adjustedZ"]] = ground_pt + network.loc[network.id == point_id, ["adjustedX", "adjustedY", "adjustedZ"]] = ground_pt + # We have to do a separate loop to assign a list to a single cell + for measure_id, row in group.iterrows(): + network.at[measure_id, 'aprioriCovar'] = covar_vec return network class CsmParameter: @@ -199,55 +212,135 @@ def compute_ground_partials(sensor, ground_pt): partials = np.array(sensor.computeGroundPartials(csm_ground)) return np.reshape(partials, (2, 3)) -def compute_jacobian(network, sensors, parameters): +def compute_coefficient_columns(network, sensors, parameters): """ - Compute the Jacobian matrix. + Compute the columns for different coefficients Parameters ---------- network : DataFrame - The control network as a dataframe generated by plio. The ground - point columns will be in the same order as the control points are - in this. + The control network as a dataframe generated by plio. sensors : dict Dictionary that maps ISIS serial numbers to CSM sensor models parameters : dict Dictionary that maps serial numbers to lists of parameters to - solve for. The image parameter columns of the Jacobian will be - in the same order as this. + solve for. Returns ------- - : ndarray - The Jacobian matrix + : OrderedDict + Dictionary that maps serial numbers and point IDs to the column range + their parameters are in the Jacobian matrix. """ num_columns = 0 coefficient_columns = OrderedDict() for serial in network["serialnumber"].unique(): coefficient_columns[serial] = num_columns num_columns += len(parameters[serial]) + coefficient_columns[serial] = (coefficient_columns[serial], num_columns) for point_id in network["id"].unique(): # Skip fixed points if network.loc[network.id == point_id].iloc[0]["pointType"] == 4: continue coefficient_columns[point_id] = num_columns num_columns += 3 + coefficient_columns[point_id] = (coefficient_columns[point_id], num_columns) + return coefficient_columns - num_rows = len(network) * 2 +def compute_jacobian(network, sensors, parameters, coefficient_columns): + """ + Compute the Jacobian matrix. + Parameters + ---------- + network : DataFrame + The control network as a dataframe generated by plio. + sensors : dict + Dictionary that maps ISIS serial numbers to CSM sensor models + parameters : dict + Dictionary that maps serial numbers to lists of parameters to + solve for. + coefficient_columns : OrderedDict + Dictionary that maps serial numbers and point IDs to + the column range their parameters are in the Jacobian + matrix. + + Returns + ------- + : ndarray + The Jacobian matrix + """ + num_columns = max([col_range[1] for col_range in coefficient_columns.values()]) + num_rows = len(network) * 2 jacobian = np.zeros((num_rows, num_columns)) + for i in range(len(network)): row = network.iloc[i] serial = row["serialnumber"] ground_pt = row[["adjustedX", "adjustedY", "adjustedZ"]] sensor = sensors[serial] params = parameters[serial] - image_column = coefficient_columns[serial] - point_column = coefficient_columns[row["id"]] - jacobian[2*i : 2*i+2, image_column : image_column+len(params)] = compute_sensor_partials(sensor, params, ground_pt) - jacobian[2*i : 2*i+2, point_column : point_column+3] = compute_ground_partials(sensor, ground_pt) + image_range = coefficient_columns[serial] + point_range = coefficient_columns[row["id"]] + jacobian[2*i : 2*i+2, image_range[0] : image_range[1]] = compute_sensor_partials(sensor, params, ground_pt) + jacobian[2*i : 2*i+2, point_range[0] : point_range[1]] = compute_ground_partials(sensor, ground_pt) + + return jacobian - return jacobian, coefficient_columns +def compute_parameter_weights(network, sensors, parameters, coefficient_columns): + """ + Compute the parameter weight matrix + + Parameters + ---------- + network : DataFrame + The control network as a dataframe generated by plio. + sensors : dict + Dictionary that maps ISIS serial numbers to CSM sensor models + parameters : dict + Dictionary that maps serial numbers to lists of parameters to + solve for. + coefficient_columns : OrderedDict + Dictionary that maps serial numbers and point IDs to + the column range their parameters are in the Jacobian + matrix. Their parameters weights will have the same + ordering in the weight matrix. + + Returns + ------- + : ndarray + The parameter weight matrix + """ + num_params = max([col_range[1] for col_range in coefficient_columns.values()]) + weight_mat = np.zeros((num_params, num_params)) + + # Image parameters + for sn, params in parameters.items(): + param_count = len(params) + covar_mat = np.zeros((param_count, param_count)) + for a, b in itertools.product(range(param_count), range(param_count)): + covar_mat[a, b] = sensors[sn].getParameterCovariance(params[a].index, params[b].index) + col_range = coefficient_columns[sn] + weight_mat[col_range[0]:col_range[1], col_range[0]:col_range[1]] = np.linalg.inv(covar_mat) + + # Point parameters + for point_id, group in network.groupby('id'): + ## If there is no covariance matrix, then just continue on + point_covar = list(group.iloc[0]["aprioriCovar"]) + if len(point_covar) != 6: + continue + # The covariance matrix is stored as just one triangle, so we have + # to unpack it. + if len(point_covar) == 6: + covar_mat = np.array( + [[point_covar[0], point_covar[1], point_covar[2]], + [point_covar[1], point_covar[3], point_covar[4]], + [point_covar[2], point_covar[4], point_covar[5]]] + ) + col_range = coefficient_columns[point_id] + weight_mat[col_range[0]:col_range[1], col_range[0]:col_range[1]] = np.linalg.inv(covar_mat) + + return weight_mat def compute_residuals(network, sensors): """ diff --git a/tests/test_bundle.py b/tests/test_bundle.py index 7f8c02cb9b2b83559b2fa9b86b6caee4203fb96f..b1265d38784ff8a46b3a9b8e2150061290586bd4 100644 --- a/tests/test_bundle.py +++ b/tests/test_bundle.py @@ -4,6 +4,7 @@ import pytest import numpy as np import pandas as pd from knoten import bundle +from collections import OrderedDict from csmapi import csmapi @pytest.fixture @@ -13,6 +14,7 @@ def control_network(): 'serialnumber': ['a', 'b', 'c', 'd', 'b', 'a', 'b', 'c', 'd'], 'line': np.arange(9), 'sample': np.arange(9)[::-1], + 'aprioriCovar': [[], [], [], [], [], [], [], [], []], 'aprioriX': np.zeros(9), 'aprioriX': np.zeros(9), 'aprioriY': np.zeros(9), @@ -37,43 +39,49 @@ def sensors(): def test_closest_approach_intersect(): points = np.array([[-1, 1, 2], [0, 2, 2], [0, 1, 3]]) directions = np.array([[1, 0, 0], [0, 2, 0], [0, 0, -1]]) - res = bundle.closest_approach(points, directions) + res, covar = bundle.closest_approach(points, directions) np.testing.assert_allclose(res, [0, 1, 2]) def test_closest_approach_no_intersect(): points = np.array([[-1, 1, 2], [0.5, 1-np.sqrt(3)/2.0, 2], [0.5, 1+np.sqrt(3)/2.0, 4]]) directions = np.array([[0, 1, 0], [np.sqrt(3)/2.0, 0.5, 0], [0, 0, 1]]) - res = bundle.closest_approach(points, directions) + res, covar = bundle.closest_approach(points, directions) np.testing.assert_allclose(res, [0, 1, 2], atol=1e-12) def test_compute_ground_points(control_network, sensors): expected_bob = np.array([1.0, 7.0, 0.0]) + bob_covar = np.array([[1.0, 0.1, 0.2], [0.1, 1.5, 0.15], [0.2, 0.15, 3.0]]) expected_sally = np.array([6.5, 1.5, 0.0]) + sally_covar = np.array([[2.0, 1.1, 0.6], [1.1, 1.0, 0.45], [0.6, 0.45, 3.2]]) - with mock.patch('knoten.bundle.closest_approach', side_effect=[expected_bob, expected_sally]) as mock_closest: + with mock.patch('knoten.bundle.closest_approach', side_effect=[(expected_bob, bob_covar), (expected_sally, sally_covar)]) as mock_closest: out_df = bundle.compute_apriori_ground_points(control_network, sensors) mock_closest.assert_called() - np.testing.assert_array_equal( - out_df[out_df.id == "bob"][["aprioriX", "aprioriY", "aprioriZ"]].values, - np.repeat(expected_bob[:, None], 3, axis=1).T) - np.testing.assert_array_equal( - out_df[out_df.id == "bob"][["adjustedX", "adjustedY", "adjustedZ"]].values, - np.repeat(expected_bob[:, None], 3, axis=1).T) - np.testing.assert_array_equal( - out_df[out_df.id == "tim"][["aprioriX", "aprioriY", "aprioriZ"]].values, - np.zeros((2, 3))) - np.testing.assert_array_equal( - out_df[out_df.id == "tim"][["adjustedX", "adjustedY", "adjustedZ"]].values, - np.zeros((2, 3))) - np.testing.assert_array_equal( - out_df[out_df.id == "sally"][["aprioriX", "aprioriY", "aprioriZ"]].values, - np.repeat(expected_sally[:, None], 4, axis=1).T) - np.testing.assert_array_equal( - out_df[out_df.id == "sally"][["adjustedX", "adjustedY", "adjustedZ"]].values, - np.repeat(expected_sally[:, None], 4, axis=1).T) + for _, row in out_df[out_df.id == "bob"].iterrows(): + np.testing.assert_array_equal(row[["aprioriX", "aprioriY", "aprioriZ"]].values, + expected_bob) + np.testing.assert_array_equal(row[["adjustedX", "adjustedY", "adjustedZ"]].values, + expected_bob) + np.testing.assert_array_equal(list(row["aprioriCovar"]), + [bob_covar[0,0], bob_covar[0,1], bob_covar[0,2], + bob_covar[1,1], bob_covar[1,2], bob_covar[2,2]]) + for _, row in out_df[out_df.id == "tim"].iterrows(): + np.testing.assert_array_equal(row[["aprioriX", "aprioriY", "aprioriZ"]].values, + np.zeros(3)) + np.testing.assert_array_equal(row[["adjustedX", "adjustedY", "adjustedZ"]].values, + np.zeros(3)) + assert not list(row["aprioriCovar"]) + for _, row in out_df[out_df.id == "sally"].iterrows(): + np.testing.assert_array_equal(row[["aprioriX", "aprioriY", "aprioriZ"]].values, + expected_sally) + np.testing.assert_array_equal(row[["adjustedX", "adjustedY", "adjustedZ"]].values, + expected_sally) + np.testing.assert_array_equal(list(row["aprioriCovar"]), + [sally_covar[0,0], sally_covar[0,1], sally_covar[0,2], + sally_covar[1,1], sally_covar[1,2], sally_covar[2,2]]) def test_get_sensor_parameter(): mock_sensor = mock.MagicMock(spec=csmapi.RasterGM) @@ -117,9 +125,17 @@ def test_compute_jacobian(control_network, sensors): parameters = {sn: [mock.MagicMock()]*2 for sn in sensors} sensor_partials = [(i+1) * np.ones((2, 2)) for i in range(9)] ground_partials = [-(i+1) * np.ones((2, 3)) for i in range(9)] + coefficient_columns = OrderedDict() + coefficient_columns['a'] = (0, 2) + coefficient_columns['b'] = (2, 4) + coefficient_columns['c'] = (4, 6) + coefficient_columns['d'] = (6, 8) + coefficient_columns['bob'] = (8, 11) + coefficient_columns['tim'] = (11, 14) + coefficient_columns['sally'] = (14, 17) with mock.patch('knoten.bundle.compute_sensor_partials', side_effect=sensor_partials) as sensor_par_mock, \ mock.patch('knoten.bundle.compute_ground_partials', side_effect=ground_partials) as ground_par_mock: - J, coefficient_columns = bundle.compute_jacobian(control_network, sensors, parameters) + J = bundle.compute_jacobian(control_network, sensors, parameters, coefficient_columns) expected_J = [ [1, 1, 0, 0, 0, 0, 0, 0, -1, -1, -1, 0, 0, 0, 0, 0, 0],