Skip to content
Snippets Groups Projects
Commit f6e1f239 authored by Jesse Mapel's avatar Jesse Mapel
Browse files

Minor updates

parent 86f9fdc4
No related branches found
No related tags found
No related merge requests found
%% Cell type:code id: tags:
``` python
import os
from plio.io import io_controlnetwork
from knoten.csm import create_csm
from scipy import sparse
import ale
import csmapi
import numpy as np
from collections import OrderedDict
import matplotlib.pyplot as plt
from knoten.bundle import *
```
%% Output
/Users/jmapel/miniconda3/envs/knoten/lib/python3.7/site-packages/ale/__init__.py:22: UserWarning: ALESPICEROOT environment variable not set, Spice Drivers will not function correctly
warnings.warn('ALESPICEROOT environment variable not set, Spice Drivers will not function correctly')
%% Cell type:code id: tags:
``` python
cubes = 'data/cubes.lis'
sensors = generate_sensors(cubes)
network = 'data/hand_dense.net'
cnet = io_controlnetwork.from_isis(network)
sensors = {sn: sensors[sn] for sn in cnet["serialnumber"].unique()}
cnet = compute_apriori_ground_points(cnet, sensors) # autoseed did not generate ground points, calculate and repopulate the data frame
```
%% Cell type:code id: tags:
``` python
all_parameters = {sn: get_sensor_parameters(sensor) for sn, sensor in sensors.items()}
for sn, parameters in all_parameters.items():
print(f"Image: {sn}")
for param in parameters:
print(f" {param.name} | {param.index} | {param.value}")
```
%% Output
Image: MRO/CTX/1085197697:073
IT Pos. Bias | 0 | 0.0
CT Pos. Bias | 1 | 0.0
Rad Pos. Bias | 2 | 0.0
IT Vel. Bias | 3 | 0.0
CT Vel. Bias | 4 | 0.0
Rad Vel. Bias | 5 | 0.0
Omega Bias | 6 | 0.0
Phi Bias | 7 | 0.0
Kappa Bias | 8 | 0.0
Omega Rate | 9 | 0.0
Phi Rate | 10 | 0.0
Kappa Rate | 11 | 0.0
Omega Accl | 12 | 0.0
Phi Accl | 13 | 0.0
Kappa Accl | 14 | 0.0
Focal Bias | 15 | 0.0
Image: MRO/CTX/1157902986:250
IT Pos. Bias | 0 | 0.0
CT Pos. Bias | 1 | 0.0
Rad Pos. Bias | 2 | 0.0
IT Vel. Bias | 3 | 0.0
CT Vel. Bias | 4 | 0.0
Rad Vel. Bias | 5 | 0.0
Omega Bias | 6 | 0.0
Phi Bias | 7 | 0.0
Kappa Bias | 8 | 0.0
Omega Rate | 9 | 0.0
Phi Rate | 10 | 0.0
Kappa Rate | 11 | 0.0
Omega Accl | 12 | 0.0
Phi Accl | 13 | 0.0
Kappa Accl | 14 | 0.0
Focal Bias | 15 | 0.0
Image: MRO/CTX/1096561308:045
IT Pos. Bias | 0 | 0.0
CT Pos. Bias | 1 | 0.0
Rad Pos. Bias | 2 | 0.0
IT Vel. Bias | 3 | 0.0
CT Vel. Bias | 4 | 0.0
Rad Vel. Bias | 5 | 0.0
Omega Bias | 6 | 0.0
Phi Bias | 7 | 0.0
Kappa Bias | 8 | 0.0
Omega Rate | 9 | 0.0
Phi Rate | 10 | 0.0
Kappa Rate | 11 | 0.0
Omega Accl | 12 | 0.0
Phi Accl | 13 | 0.0
Kappa Accl | 14 | 0.0
Focal Bias | 15 | 0.0
Image: MRO/CTX/1136952576:186
IT Pos. Bias | 0 | 0.0
CT Pos. Bias | 1 | 0.0
Rad Pos. Bias | 2 | 0.0
IT Vel. Bias | 3 | 0.0
CT Vel. Bias | 4 | 0.0
Rad Vel. Bias | 5 | 0.0
Omega Bias | 6 | 0.0
Phi Bias | 7 | 0.0
Kappa Bias | 8 | 0.0
Omega Rate | 9 | 0.0
Phi Rate | 10 | 0.0
Kappa Rate | 11 | 0.0
Omega Accl | 12 | 0.0
Phi Accl | 13 | 0.0
Kappa Accl | 14 | 0.0
Focal Bias | 15 | 0.0
%% Cell type:code id: tags:
``` python
# Solve for angles and angular rates
solve_parameters = {sn: params[6:12] for sn, params in all_parameters.items()}
for sn, parameters in solve_parameters.items():
print(f"Image: {sn}")
for param in parameters:
print(f" {param.name} | {param.index} | {param.value}")
```
%% Output
Image: MRO/CTX/1085197697:073
Omega Bias | 6 | 0.0
Phi Bias | 7 | 0.0
Kappa Bias | 8 | 0.0
Omega Rate | 9 | 0.0
Phi Rate | 10 | 0.0
Kappa Rate | 11 | 0.0
Image: MRO/CTX/1157902986:250
Omega Bias | 6 | 0.0
Phi Bias | 7 | 0.0
Kappa Bias | 8 | 0.0
Omega Rate | 9 | 0.0
Phi Rate | 10 | 0.0
Kappa Rate | 11 | 0.0
Image: MRO/CTX/1096561308:045
Omega Bias | 6 | 0.0
Phi Bias | 7 | 0.0
Kappa Bias | 8 | 0.0
Omega Rate | 9 | 0.0
Phi Rate | 10 | 0.0
Kappa Rate | 11 | 0.0
Image: MRO/CTX/1136952576:186
Omega Bias | 6 | 0.0
Phi Bias | 7 | 0.0
Kappa Bias | 8 | 0.0
Omega Rate | 9 | 0.0
Phi Rate | 10 | 0.0
Kappa Rate | 11 | 0.0
%% Cell type:code id: tags:
``` python
column_dict = compute_coefficient_columns(cnet, sensors, solve_parameters)
```
%% Cell type:code id: tags:
``` python
num_observations = 2 * len(cnet)
W_observations = np.eye(num_observations) # this is a place holder until Jesse adds his calculations
W_params = compute_parameter_weights(cnet, sensors, solve_parameters, column_dict)
dof = W_observations.shape[0] - W_params.shape[0]
```
%% Cell type:code id: tags:
``` python
max_iterations = 10
tol = 1e-10
```
%% Cell type:code id: tags:
``` python
num_sensor_params = sum([len(parameters) for parameters in solve_parameters.values()])
num_point_params = 3 * len(cnet["id"].unique())
```
%% Cell type:code id: tags:
``` python
sensors = generate_sensors(cubes) # generate sensors
cnet = io_controlnetwork.from_isis(network) # load in network
cnet = compute_apriori_ground_points(cnet, sensors) # calculate ground points
column_dict = compute_coefficient_columns(cnet, sensors, solve_parameters)
num_parameters = max(col_range[1] for col_range in column_dict.values())
num_observations = 2 * len(cnet)
W_observations = np.eye(num_observations)
W_params = compute_parameter_weights(cnet, sensors, solve_parameters, column_dict)
iteration = 0
V = compute_residuals(cnet, sensors)
dX = np.zeros(W_params.shape[0]) #initialize for sigma calculatioN
sigma0 = compute_sigma(V, dX, W_params, W_observations)
sigma0 = compute_sigma0(V, dX, W_params, W_observations)
print(f'iteration {iteration}: sigma0 = {sigma0}\n')
total_correction_dense = np.zeros(num_parameters)
for i in range(max_iterations):
iteration += 1
old_sigma0 = sigma0
J = compute_jacobian(cnet, sensors, solve_parameters, column_dict)
N = J.T.dot(W_observations).dot(J) + W_params # calculate the normal equation
C = J.T.dot(W_observations).dot(V) - W_params.dot(total_correction_dense)
dX = np.linalg.inv(N).dot(C) #calculate change in camera parameters and ground points
total_correction_dense += dX
print(f'corrections: mean = {dX.mean()} min = {dX.min()} max = {dX.max()}')
update_parameters(sensors, solve_parameters, cnet, dX, column_dict)
V = compute_residuals(cnet, sensors)
sigma0 = compute_sigma(V, dX, W_params, W_observations)
sigma0 = compute_sigma0(V, dX, W_params, W_observations)
sigma0 = np.sqrt((V.dot(W_observations).dot(V) + dX.dot(W_params).dot(dX))/dof)
print(f'iteration {iteration}: sigma0 = {sigma0}\n')
if (abs(sigma0 - old_sigma0) < tol):
print(f'change in sigma0 of {abs(sigma0 - old_sigma0)} converged!')
break
```
%% Output
iteration 0: sigma0 = 4.204919720360203
corrections: mean = 0.05349160077573805 min = -14.251221291713048 max = 40.834990964537575
iteration 1: sigma0 = 1.3539063538494558
corrections: mean = 2.7124445740181957e-06 min = -0.011426521506164248 max = 0.011842165594603658
iteration 2: sigma0 = 1.127336617096085
corrections: mean = 4.600122255126198e-08 min = -1.8475863314383526e-06 max = 4.573929399245028e-06
iteration 3: sigma0 = 1.1273365324778626
corrections: mean = 1.879394505450981e-11 min = -1.283151025419733e-09 max = 1.8514518745707175e-09
iteration 4: sigma0 = 1.127336532470605
change in sigma0 of 7.257527911974648e-12 converged!
%% Cell type:code id: tags:
``` python
sensors = generate_sensors(cubes) # generate sensors
cnet = io_controlnetwork.from_isis(network) # load in network
cnet = compute_apriori_ground_points(cnet, sensors) # calculate ground points
# This is setup once per bundle and then accumulates
total_corrections = np.zeros(num_sensor_params + num_point_params)
# These are computed once per bundle
W_CC_sparse = sparse.csc_matrix((num_sensor_params, num_sensor_params))
W_PP = {}
# Compute image param weight matrices
for sn, params in solve_parameters.items():
coeff_indices = column_dict[sn]
coeff_range = np.arange(coeff_indices[0], coeff_indices[1])
num_image_coeffs = coeff_indices[1] - coeff_indices[0]
W_CC_data = compute_image_weight(sensors[sn], params).ravel()
W_CC_row = np.repeat(coeff_range, num_image_coeffs)
W_CC_column = np.tile(coeff_range, num_image_coeffs)
W_CC_sparse += sparse.coo_matrix((W_CC_data, (W_CC_row, W_CC_column)), (num_sensor_params, num_sensor_params)).tocsc()
# Compute point param weight matrices
for point_id in cnet['id'].unique():
W_PP[point_id] = compute_point_weight(cnet, point_id)
V = compute_residuals(cnet, sensors)
sigma0 = compute_sigma_sparse(V, np.zeros(total_corrections.shape), W_CC_sparse, W_PP, W_observations, column_dict)
sigma0 = compute_sigma0_sparse(V, np.zeros(total_corrections.shape), W_CC_sparse, W_PP, W_observations, column_dict)
# Start iteration logic
for i in range(max_iterations):
old_sigma0 = sigma0
H_CC_sparse = sparse.csc_matrix((num_sensor_params, num_sensor_params))
H_CC_sparse += W_CC_sparse
g_C_sparse = np.zeros(num_sensor_params)
g_C_sparse -= W_CC_sparse.dot(total_corrections[:num_sensor_params])
# g_C_sparse += W_CC_sparse.dot(sensor_corrections)
# Q = H_PP^-1 * H_PC
Q_mats = {}
# NIC = H_PP^-1 * g_P
NIC_vecs = {}
updates = np.zeros(num_sensor_params + num_point_params)
for point_id, group in cnet.groupby('id'):
ground_pt = group.iloc[0][["adjustedX", "adjustedY", "adjustedZ"]]
H_CP = sparse.csc_matrix((num_sensor_params, 3))
H_PP = np.zeros((3, 3))
g_P = np.zeros(3)
for measure_idx, row in group.iterrows():
serial = row["serialnumber"]
sensor = sensors[serial]
point_partials = compute_ground_partials(sensor, ground_pt)
sensor_partials = compute_sensor_partials(sensor, solve_parameters[serial], ground_pt)
coeff_indices = column_dict[serial]
coeff_range = np.arange(coeff_indices[0], coeff_indices[1])
num_image_coeffs = coeff_indices[1] - coeff_indices[0]
H_CC_point_data = np.dot(sensor_partials.T, sensor_partials).ravel()
H_CC_point_row = np.repeat(coeff_range, num_image_coeffs)
H_CC_point_column = np.tile(coeff_range, num_image_coeffs)
H_CC_sparse += sparse.coo_matrix((H_CC_point_data, (H_CC_point_row, H_CC_point_column)), (num_sensor_params, num_sensor_params)).tocsc()
H_CP_point_data = np.dot(sensor_partials.T, point_partials).ravel()
H_CP_point_row = np.repeat(coeff_range, 3)
H_CP_point_column = np.tile(np.arange(0, 3), num_image_coeffs)
H_CP += sparse.coo_matrix((H_CP_point_data, (H_CP_point_row, H_CP_point_column)), (num_sensor_params, 3)).tocsc()
H_PP += np.dot(point_partials.T, point_partials)
g_C_sparse[coeff_indices[0]:coeff_indices[1]] += np.dot(sensor_partials.T, V[2*measure_idx:2*measure_idx+2])
g_P += np.dot(point_partials.T, V[2*measure_idx:2*measure_idx+2])
point_param_range = column_dict[point_id]
g_P -= W_PP[point_id].dot(total_corrections[point_param_range[0]:point_param_range[1]])
H_PP += W_PP[point_id]
H_PP_inv = sparse.csc_matrix(np.linalg.inv(H_PP))
Q_mats[point_id] = H_PP_inv.dot(H_CP.transpose())
NIC_vecs[point_id] = H_PP_inv.dot(g_P)
H_CC_sparse -= H_CP.dot(Q_mats[point_id])
g_C_sparse -= H_CP.dot(NIC_vecs[point_id])
updates[:num_sensor_params] = sparse.linalg.spsolve(H_CC_sparse, g_C_sparse)
for point_id in Q_mats:
point_param_indices = column_dict[point_id]
updates[point_param_indices[0]:point_param_indices[1]] = NIC_vecs[point_id] - Q_mats[point_id].dot(updates[:num_sensor_params])
print(f'corrections: mean = {updates.mean()} min = {updates.min()} max = {updates.max()}')
total_corrections += updates
update_parameters(sensors, solve_parameters, cnet, updates, column_dict)
V = compute_residuals(cnet, sensors)
sigma0 = compute_sigma_sparse(V, updates, W_CC_sparse, W_PP, W_observations, column_dict)
sigma0 = compute_sigma0_sparse(V, updates, W_CC_sparse, W_PP, W_observations, column_dict)
print(f'iteration {i+1}: sigma0 = {sigma0}\n')
if (abs(sigma0 - old_sigma0) < tol):
print(f'change in sigma0 of {abs(sigma0 - old_sigma0)} converged!')
break
```
%% Output
corrections: mean = 0.05349160077573873 min = -14.251221291713076 max = 40.834990964537546
iteration 1: sigma0 = 1.353906353849451
corrections: mean = 2.712443886869334e-06 min = -0.011426521492657233 max = 0.011842165579696888
iteration 2: sigma0 = 1.1273366170980164
corrections: mean = 4.6016402655676494e-08 min = -1.8475512374681445e-06 max = 4.5737885812869825e-06
iteration 3: sigma0 = 1.1273365324789022
corrections: mean = 3.7306738076770124e-12 min = -1.429027500961002e-09 max = 1.6954805056745274e-09
iteration 4: sigma0 = 1.1273365324578994
change in sigma0 of 2.100275509064886e-11 converged!
%% Cell type:code id: tags:
``` python
print("Sensor diff")
print(np.min(total_correction_dense[:num_sensor_params].flatten() - total_corrections[:num_sensor_params]))
print(np.max(total_correction_dense[:num_sensor_params].flatten() - total_corrections[:num_sensor_params]))
print("Point diff")
print(np.min(total_correction_dense[num_sensor_params:].flatten() - total_corrections[num_sensor_params:]))
print(np.max(total_correction_dense[num_sensor_params:].flatten() - total_corrections[num_sensor_params:]))
```
%% Output
Sensor diff
-1.450808806424675e-09
1.2130136894938914e-09
Point diff
-2.297802850770303e-10
4.665956510052638e-10
%% Cell type:code id: tags:
``` python
```
......
......@@ -435,7 +435,7 @@ def update_parameters(sensors, parameters, network, updates, coefficient_columns
for sn, sensor in sensors.items():
for i, param in enumerate(parameters[sn]):
if i > coefficient_columns[sn][1]:
print('THIS SHOULD BE AN ACTUAL ERROR')
raise IndexError(f'parameter [{param.name}] at index [{i}] is beyond the max index [{coefficient_columns[sn][1]}].')
current_value = sensor.getParameterValue(param.index)
sensor.setParameterValue(param.index, current_value+updates[coefficient_columns[sn][0]+i])
......@@ -446,7 +446,7 @@ def update_parameters(sensors, parameters, network, updates, coefficient_columns
adj = updates[coefficient_columns[point_id][0]:coefficient_columns[point_id][1]]
network.loc[network.id == point_id, ["adjustedX", "adjustedY", "adjustedZ"]] = ground_pt + adj
def compute_sigma(V, dX, W_parameters, W_observations):
def compute_sigma0(V, dX, W_parameters, W_observations):
"""
Computes the resulting standard deviation of the residuals for the current state of the bundle network.
......@@ -475,7 +475,7 @@ def compute_sigma(V, dX, W_parameters, W_observations):
sigma0 = np.sqrt(VTPV/dof)
return sigma0
def compute_sigma_sparse(V, dX, W_sensors, W_points, W_observations, column_dict):
def compute_sigma0_sparse(V, dX, W_sensors, W_points, W_observations, column_dict):
"""
Computes the resulting standard deviation of the residuals for the current state of the bundle network.
......
......@@ -184,7 +184,7 @@ def test_compute_sigma0():
[0, 0, 0, -7, -8, -9]]
)
dX = np.arange(-6, 0)
assert bundle.compute_sigma(V, dX, W_params, W_obs) == np.sqrt(7809 / 10)
assert bundle.compute_sigma0(V, dX, W_params, W_obs) == np.sqrt(7809 / 10)
def test_compute_sigma0_sparse():
V = np.arange(0, 16) + 1
......@@ -198,7 +198,7 @@ def test_compute_sigma0_sparse():
"image_1" : (0, 3),
"point_1" : (3, 6)
}
assert bundle.compute_sigma_sparse(V, dX, W_sensors, W_points, W_obs, column_dict) == np.sqrt(7809 / 10)
assert bundle.compute_sigma0_sparse(V, dX, W_sensors, W_points, W_obs, column_dict) == np.sqrt(7809 / 10)
def test_compute_image_weight():
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please to comment