diff --git a/demeter/ncdf_conversion.py b/demeter/ncdf_conversion.py index bfa53d8..77fe146 100644 --- a/demeter/ncdf_conversion.py +++ b/demeter/ncdf_conversion.py @@ -259,17 +259,3 @@ def process_output(self, return ds -if __name__ == "__main__": - - input_file_directory = "/Users/d3y010/Downloads" - output_file_directory = "/Users/d3y010/Desktop" - target_year = 2005 - - # instantiate class - x = DemeterToNetcdf(scenario_name="rcp85cooler", - project_name="im3") - - # convert demeter output to a NetCDF file - x.process_output(input_file_directory=input_file_directory, - output_file_directory=output_file_directory, - target_year=target_year) diff --git a/demeter/process.py b/demeter/process.py index db81017..40a67af 100644 --- a/demeter/process.py +++ b/demeter/process.py @@ -58,8 +58,7 @@ def prep_step(self): self.s.lat, self.s.lon, self.step, self.s.kernel_vector, self.s.weights, self.s.spat_ludataharm) - # create transition array to store data - #self.transitions = np.zeros(shape=(self.l_spat_region, self.l_order_rules, self.l_order_rules)) + def intense_pass(self, pass_num): """Conduct the first pass of intensification.""" @@ -128,12 +127,7 @@ def outputs(self): # convert land cover from sqkm per grid cell per land class to fraction (n_grids, n_landclasses) fraction_lu = self.s.spat_ludataharm / np.tile(self.s.cellarea * self.s.celltrunk, (self.l_fcs, 1)).T - # optionally save land cover transitions as a CSV - #if (self.config.save_transitions == 1) and (self.step in self.config.target_years_output): - - # self.config.logger.info("Saving land cover transition files for time step {0}...".format(self.step)) - # wdr.write_transitions(self.s, self.config, self.step, self.transitions) # create a NetCDF file of land cover fraction for each year by grid cell containing each land class @@ -151,7 +145,7 @@ def outputs(self): write_ncdf =True return wdr.lc_timestep_csv(self.config, self.step, self.s.final_landclasses, self.s.spat_coords, orig_spat_aez, self.s.spat_region, self.s.spat_water, self.s.cellarea, self.s.spat_ludataharm, - self.config.metric, self.config.tabular_units, self.write_outputs,write_ncdf,self.sce,self.res,write_csv,self.regrid_res) + self.config.metric, self.config.tabular_units, self.write_outputs, write_ncdf, self.sce, self.res, write_csv, self.regrid_res) def process(self): """ diff --git a/demeter/weight/kernel_density.py b/demeter/weight/kernel_density.py index 4926f1d..2d28d4d 100644 --- a/demeter/weight/kernel_density.py +++ b/demeter/weight/kernel_density.py @@ -18,19 +18,30 @@ def handle_single_pft(pft_order, order_rules, final_landclasses, pft_maps, cellindexresin, - spat_ludataharm, kernel_maps, kernel_vector,weights): - """ - Helper function to handle pft convultion filters. This is used to parallelize the convultion filter operation to speed up processing. + spat_ludataharm, kernel_maps, kernel_vector, weights): + """Helper function to handle pft convultion filters. This is passed to apply_convultiion This is used to parallelize the convultion filter operation to speed up processing. - """ + Parameters: + pft_order (List[int]): The order of pfts + order_rules (List[str]): The order rules defined by user + final_landclasses (List[int]): A list of land classes + pft_maps (dict): A map of pfts + cellindexresin (Union[None, int]): Input resolution + spat_ludataharm (pd.Dataframe): Dataframe of land use + + + Returns: + list: processed convultion filters for each LT. + + """ pft = np.where(order_rules == pft_order)[0][0] # get final land class name flc = final_landclasses[pft] - # print(pft) + # populate pft_maps array with base land use layer data pft_maps[np.int_(cellindexresin[0, :]), np.int_(cellindexresin[1, :]), pft] = spat_ludataharm[:, pft] - # print(pft_maps.shape) + # apply image filter kernel_maps[:, :, pft] = ndimage.filters.convolve(pft_maps[:, :, pft], weights, output=None, mode='wrap') @@ -40,8 +51,8 @@ def handle_single_pft(pft_order, order_rules, final_landclasses, pft_maps, celli min_seed = 0.0000000001 kernel_maps[:, :, pft][ - kernel_maps[:, :, pft] == 0] = min_seed # np.nanmin(kernel_maps[:, :, pft], [kernel_maps[:, :, pft] > 0]) - # print(kernel_maps.shape) + kernel_maps[:, :, pft] == 0] = min_seed # np.nanmin(kernel_maps[:, :, pft], [kernel_maps[:, :, pft] > 0]) + # reshaping to the spatial grid-cell data (vector) kernel_vector[:, pft] = kernel_maps[np.int_(cellindexresin[0, :]), np.int_(cellindexresin[1, :]), pft] @@ -184,13 +195,12 @@ def apply_convolution(self, cellindexresin, pft_maps, kernel_maps, lat, lon, yr, order_rules= self.order_rules final_landclasses= self.final_landclasses - - - pool.starmap(handle_single_pft, zip(aux_val,repeat(order_rules),repeat(final_landclasses), + pool.starmap(handle_single_pft, zip(aux_val,repeat(order_rules), repeat(final_landclasses), repeat(pft_maps),repeat(cellindexresin), repeat(spat_ludataharm), repeat(kernel_maps), repeat(kernel_vector),repeat(weights))) pool.terminate() + return kernel_vector diff --git a/processing_scripts/convert_csv_col_to_ncdf_subdata.py b/processing_scripts/convert_csv_col_to_ncdf_subdata.py deleted file mode 100644 index 7de4f05..0000000 --- a/processing_scripts/convert_csv_col_to_ncdf_subdata.py +++ /dev/null @@ -1,97 +0,0 @@ - -import xarray as x -import pandas as pd -import numpy as np -import os.path -import netCDF4 -import rioxarray -import sys -from datetime import date - -#Define compression parameters here -comp = dict(zlib=True, complevel=5, dtype="float32") - -#Set grid resolution -GRID_RESOLUTION = 0.05 - -#Define scenario -sce= "rcp85cooler_ssp3" - -#Define start and end years and interval -start_year=2005 -end_year=2100 -interval= 5 - -#Define the bounds we want to generate the ncdf4 -xmin=-179.975 -xmax=180 -ymin=-90.025 -ymax=90 - -#Define path to results and base layer -path_to_results= "C:/Projects/IM3/im3_demeter_clm/outputs/rcp85cooler_ssp3_2022-10-06_10h51m16s/spatial_landcover_tabular/" -path_to_layer = "C:/Projects/im3_demeter_clm/inputs/observed/baselayerdata_region_basin_0.05deg.csv" - -#Set year of analysis -years =[] -for k in range(start_year,end_year+interval,interval): - years.append(k) - -# create evenly-spaced coordinate pairs grid on the basis of lat lon values -lon1 = np.arange(-179.975, 180, GRID_RESOLUTION) -lon1 = np.round(lon1,3) -lat1 = np.arange(-90.025, 90, GRID_RESOLUTION) -lat1 = np.round(lat1,3) - - -#Start year processing -for j in years: - #Read in US outputs - lu_file = pd.read_csv(path_to_results+"landcover"+"_"+str(j)+"_timestep.csv", index_col=False) - lu_file_for_col = lu_file.drop(['latitude','longitude'],axis=1) - - columns = lu_file_for_col.columns - - - for i in columns: - - print("Start processing: "+str(i)+" in year "+str(j)) - temp_lu_file = lu_file[['latitude','longitude',i]] - - temp_lu_file.latitude=temp_lu_file.latitude.round(3) - temp_lu_file.longitude = temp_lu_file.longitude.round(3) - #print(temp_lu_file['latitude']) - df_cut = temp_lu_file[temp_lu_file['longitude'].isin(lon1)] - if(len(df_cut.index) != len(temp_lu_file.index)): - sys.exit("Longitudes dont match up. Please check input params for xmin, xmax, ymin, ymax!") - df_cut = temp_lu_file[temp_lu_file['latitude'].isin(lat1)] - if (len(df_cut.index) != len(temp_lu_file.index)): - sys.exit("Latitudes dont match up. Please check input params for xmin, xmax, ymin, ymax!") - temp_lu_file =temp_lu_file.rename(columns={'latitude':'lat','longitude':'lon'}) - temp_lu_file = temp_lu_file.set_index(['lat', 'lon']) - - - xr = temp_lu_file.to_xarray() - xr = xr.sortby(['lat','lon']) - xr = xr.reindex(lat=lat1,lon=lon1) - xr.rio.write_crs("epsg:4326", inplace=True) - - #Define encoding here - encoding = {str(i): comp} - #Check if file exists. If it does, append to it else create a new file - file_exists = os.path.exists("C:/Projects/ncdf_outputs/im3_demeter_us_"+str(sce)+"_"+str(j)+".nc") - if file_exists: - xr.to_netcdf("C:/Projects/ncdf_outputs/im3_demeter_us_"+str(sce)+"_"+str(j)+".nc", mode="a",encoding=encoding,engine='netcdf4',format='NETCDF4') - else: - #Add metadata - xr.attrs['scenario_name'] = str(sce) - xr.attrs['datum'] = "WGS84" - xr.attrs['coordinate_reference_system'] = "EPSG : 4326" - xr.attrs['demeter_version'] = "1.31" - xr.attrs['creation_date'] = str(date.today()) - xr.attrs['other_info'] = "Includes 32 land types from CLM, water fraction and GCAM region name and basin ID" - #ADD Citation of GCAM version - #Add cittaion of demeter - - xr.to_netcdf("C:/Projects/ncdf_outputs/im3_demeter_us_"+str(sce)+"_"+str(j)+".nc",encoding=encoding,engine='netcdf4',format='NETCDF4') - diff --git a/setup.py b/setup.py index da8eca2..74d52e8 100644 --- a/setup.py +++ b/setup.py @@ -19,11 +19,11 @@ def readme(): description='A land use land cover change disaggregation model', long_description=readme(), long_description_content_type="text/markdown", - install_requires=['configobj~=5.0.6', - 'numpy~=1.20.3', - 'pandas~=1.2.4', - 'scipy~=1.6.3', - 'requests~=2.20.0', + install_requires=['configobj>=5.0.6', + 'numpy >=1.20.3', + 'pandas >=1.2.4', + 'scipy >=1.6.3', + 'requests>=2.20.0', 'gcamreader>=1.2.5', 'xarray >= 0.20.2'], include_package_data=True