Skip to content
Snippets Groups Projects
Commit 4bd09d3f authored by Narayanarao Bhogapurapu's avatar Narayanarao Bhogapurapu
Browse files

multi algorithm support

parent b0b7e207
No related branches found
No related tags found
No related merge requests found
......@@ -5,50 +5,49 @@ Created on Thu Dec 27 14:40:29 2023
@author: Narayanarao
"""
import argparse
import os
import sys
import warnings
import numpy as np
import pandas as pd
import argparse,os,sys,warnings,time
import numpy as np, pandas as pd
from osgeo import gdal
from scipy import interpolate
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import Normalize
from scipy.stats import linregress
from skimage.util.shape import view_as_blocks
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.interpolate import griddata
from tqdm import tqdm
from scipy.interpolate import interpn
from plotting import density_scatter, plot_data, rmse
from algo import arc_sinc,arc_sinc_vectorized, cal_
from rst_io import read_bin, write_tif, write_bin
from utils import blockshaped, unblockshaped, unpad, spatial_intp_lin
from args_in import rvog_inverse
# Configure environment and warnings
gdal.UseExceptions()
warnings.filterwarnings('ignore')
warnings.filterwarnings('error')
np.seterr(divide='ignore', invalid='ignore')
# Command-line argument parsing
parser = argparse.ArgumentParser(description="Generate canopy height map from InSAR coherence data.")
parser.add_argument("-c", "--correlationFile", dest="corFile", help="Correlation file [0,1]")
parser.add_argument("-l", "--cal_ht", dest="lidarFile", help="Calibration height file (e.g., LiDAR heights in meters)")
parser.add_argument("-ll", "--lower_limit", dest="htl", default=0, help="Lower limit of canopy height (m)", type=int)
parser.add_argument("-ul", "--upper_limit", dest="htg", default=0, help="Upper limit of canopy height (m)", type=int)
parser.add_argument("-w", "--window", dest="window_size", default=10, help="Window size", type=int)
parser.add_argument("-val", "--validation", dest="validation", default=0, help="Fraction to split for cross-validation", type=float)
from plotting import density_scatter,plot_data,rmse
from algo import arc_sinc,arc_sinc_,cal_
from rst_io import read_bin, write_tif, write_bin
from utils import blockshaped,unblockshaped,unpad,spatial_intp_lin
from args_in import rvog_inverse
##########################################################################
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--correlationFile", dest = "corFile", help="correlation file [0,1]")
parser.add_argument("-l", "--cal_ht", dest = "lidarFile", help="Calibration height file e.g. LiDAR heights in (m)")
parser.add_argument("-ll", "--lower_limit",dest ="htl", default = 0 ,help="lower limit of canopy height (m)", type=int)
parser.add_argument("-ul", "--upper_limit",dest = "htg", default = 40,help="upper limit of canopy height (m)", type=int)
parser.add_argument("-w", "--window",dest = "window_size", default = 10, help="Size", type=int)
parser.add_argument("-val", "--validation",dest = "validation", default = 0, help="fraction to split cross validation", type=float)
parser.add_argument("-al", "--algorithm",dest = "algo", default = 1, help="Algorithm Type", type=int)
parser.add_argument("-ol", "--overlap",dest = "window_overlap", default = 0, help="window overlap fraction", type=float)
args = parser.parse_args()
if __name__ == "__main__":
try:
rvog_inverse(args)
except Exception as e:
print(f"An error occurred: {e}")
sys.exit(1)
rvog_inverse(args)
......@@ -23,12 +23,22 @@ from tqdm import tqdm
from scipy.interpolate import interpn
from sklearn.metrics import mean_squared_error as mse
from concurrent.futures import ProcessPoolExecutor, as_completed
import concurrent.futures
import os
from concurrent.futures import ThreadPoolExecutor
from scipy.interpolate import PchipInterpolator
import gc
import tracemalloc
from utils import blockshaped,unblockshaped
# Precompute XX and YY
XX = np.linspace(0, np.pi, num=100, endpoint=True)
XX[0] = np.spacing(1) # Avoid division by zero
......@@ -51,6 +61,38 @@ def arc_sinc_fast(x, c_param):
# Clip final result to the range [0, inf), works for both 1D and 2D arrays
return np.maximum(y, 0)
#################################################
#################################################
def process_st_window(win, temp_cor, temp_lidar, args):
parm = cal_(temp_cor, temp_lidar, args.htl, args.htg)
mask = temp_lidar.copy()
mask[~np.isnan(mask)] = 1
# if np.all(np.array(parm) == 0):
# parm = parm_.copy()
if np.all(mask == 0) or np.all(np.isnan(mask)):
np.fill_diagonal(mask, 1)
mask = np.flipud(mask)
np.fill_diagonal(mask, 1)
mask = np.nan_to_num(mask)
s = np.full((args.window_size, args.window_size), parm[1]) * mask
c = np.full((args.window_size, args.window_size), parm[2]) * mask
rmse = np.full((args.window_size, args.window_size), parm[4])
count = np.full((args.window_size, args.window_size),
np.count_nonzero(~np.isnan(temp_lidar)))
gama = temp_cor / parm[1]
ht = arc_sinc(gama, parm[2]) * mask
return s, c, rmse, count, ht
#################################################
##################################################
def rmse(predictions, targets):
return np.sqrt(((predictions - targets) ** 2).mean())
def arc_sinc_(x, c_param):
# Get rid of extreme values by set all values where x > 1 equal to 1, and x < 0 equal to 0
......@@ -91,62 +133,39 @@ def arc_sinc_(x, c_param):
# return y
return y
def arc_sinc(x, c_param):
x = np.clip(x, 0, 1) # Ensure x is between 0 and 1
XX = np.linspace(0, np.pi, num=100, endpoint=True)
XX[0] = np.spacing(1) # Avoid division by zero issues
YY = np.sin(XX) / XX
XX[0] = 0
YY[0] = 1
YY[-1] = 0
XX = XX[::-1]
YY = YY[::-1]
interp_func = interpolate.interp1d(YY, XX * c_param, kind='slinear', bounds_error=False, fill_value=0)
y = interp_func(x)
return np.clip(y, 0, None) # Ensure no negative heights
# Get rid of extreme values by set all values where x > 1 equal to 1, and x < 0 equal to 0
x[(x > 1)] = 1
x[(x < 0)] = 0
def arc_sinc_vectorized(x, c_param):
# Ensure x and c_param are numpy arrays
x = np.asarray(x)
c_param = np.asarray(c_param)
# Clip x to the range [0, 1]
x = np.clip(x, 0, 1)
# Create array of increments between 0 and pi
# Create array of increments between 0 and pi of size pi/100
XX = np.linspace(0, np.pi, num=100, endpoint=True)
# Set the first value of XX to eps to avoid division by zero issues
# Set the first value of XX to eps to avoid division by zero issues -> Paul's suggestion
XX[0] = np.spacing(1)
# Calculate sinc for XX and save it to YY
## YY = sinc(XX / math.pi)
YY = np.sin(XX) / XX
# Reset the first value of XX and YY
# Reset the first value of XX to zero and the first value of YY to the corresponding output
XX[0] = 0
YY[0] = 1
YY[-1] = 0
# Reverse arrays for interpolation
# Set the last value of YY to 0 to avoid NaN issues
YY[-1] = 0
# Flip XX and YY left to right
XX = XX[::-1]
YY = YY[::-1]
# Initialize result array
y = np.zeros_like(x)
# Handle the interpolation for each unique c_param value if needed
for unique_c in np.unique(c_param):
# Interpolation function for the current unique c_param
interp_func = interpolate.interp1d(YY, XX * unique_c, kind='slinear', bounds_error=False, fill_value=0)
# Apply the interpolation
mask = (c_param == unique_c)
y[mask] = interp_func(x[mask])
# Clip the result to ensure non-negative values
return np.clip(y, 0, None)
# Run interpolation
# XX and YY are your original values, x is the query values, and y is the interpolated values that correspond to x
interp_func = interpolate.interp1d(YY, XX * c_param, kind='slinear')
y = interp_func(x)
# Set all values in y less than 0 equal to 0
y[(y < 0)] = 0
# return y
return y
def cal_(temp_cor, temp_gedi, htl, htg):
try:
......@@ -380,40 +399,3 @@ def process_batch(batch_futures, cohArray, lidarArray, initial_ws, htl, htg, par
# Invoke garbage collection
gc.collect()
return results
## blocks (windows) are processed in parallel
# def dynamicWindow(cohArray, lidarArray, initial_ws, htl, htg):
# rows, cols = cohArray.shape
# c_parm = np.zeros((rows, cols))
# s_parm = np.zeros((rows, cols))
# rmse_parm = np.zeros((rows, cols))
# count = np.zeros((rows, cols))
# ht_ = np.zeros((rows, cols))
# parm_ = [0, 0, 0, 0, 0]
# num_workers = os.cpu_count()-1
# futures = []
# with ProcessPoolExecutor(max_workers=num_workers) as executor:
# for i in range(0, rows, initial_ws):
# for j in range(0, cols, initial_ws):
# futures.append(executor.submit(process_block, i, j, cohArray, lidarArray, initial_ws, htl, htg, parm_))
# # Initialize the progress bar with the total number of futures
# with tqdm(total=len(futures)) as pbar:
# completed_jobs = 0
# for future in as_completed(futures):
# start_i, end_i, start_j, end_j, s_p, c_p, r_p, ht, cnt = future.result()
# s_parm[start_i:end_i, start_j:end_j] = s_p
# c_parm[start_i:end_i, start_j:end_j] = c_p
# rmse_parm[start_i:end_i, start_j:end_j] = r_p
# ht_[start_i:end_i, start_j:end_j] = ht
# count[start_i:end_i, start_j:end_j] = cnt
# completed_jobs += 1
# if completed_jobs % 100 == 0: # Update every 100 jobs
# pbar.update(100)
# return s_parm, c_parm, rmse_parm, ht_, count
This diff is collapsed.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment