diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index a4a0331..f96a649 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -1,4 +1,4 @@
-name: ci
+name: Run tests
on: [push, pull_request]
diff --git a/.github/workflows/conda-build.yaml b/.github/workflows/conda-build.yaml
deleted file mode 100644
index 72434fb..0000000
--- a/.github/workflows/conda-build.yaml
+++ /dev/null
@@ -1,90 +0,0 @@
-name: build
-
-on: [push]
-
-jobs:
- build:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v2
- - name: Install dependencies
- run: |
- cd $HOME
- wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh
- chmod +x miniconda.sh
- bash miniconda.sh -b -p $HOME/miniconda
- export PATH="$HOME/miniconda/bin:$PATH"
- $HOME/miniconda/bin/conda install numpy scipy astropy xarray pip keyring --yes
- cd $GITHUB_WORKSPACE
- $HOME/miniconda/bin/python setup.py sdist
- - name: Publish to Anaconda
- env:
- ANACONDA_API_TOKEN: ${{ secrets.ANACONDA_TOKEN }}
- if: startsWith(github.event.ref, 'refs/tags')
- run: |
- alias python=$HOME/miniconda/bin/python
- alias pip=$HOME/miniconda/bin/pip
- $HOME/miniconda/bin/pip install twine
- $HOME/miniconda/bin/conda install conda-build --yes
- $HOME/miniconda/bin/conda install anaconda-client --yes
- $HOME/miniconda/bin/conda config --add channels mavensdc
- $HOME/miniconda/bin/conda-build --python 3.6 --output-folder . .
- $HOME/miniconda/bin/conda convert -p linux-32 linux-64/*.tar.bz2
- $HOME/miniconda/bin/conda convert -p win-32 linux-64/*.tar.bz2
- $HOME/miniconda/bin/conda convert -p osx-64 linux-64/*.tar.bz2
- $HOME/miniconda/bin/conda convert -p win-64 linux-64/*.tar.bz2
- $HOME/miniconda/bin/anaconda upload --label main linux-64/*.tar.bz2
- $HOME/miniconda/bin/anaconda upload --label main osx-64/*.tar.bz2
- $HOME/miniconda/bin/anaconda upload --label main win-64/*.tar.bz2
- $HOME/miniconda/bin/anaconda upload --label main win-32/*.tar.bz2
- $HOME/miniconda/bin/anaconda upload --label main linux-32/*.tar.bz2
- rm -r linux-64
- rm -r linux-32
- rm -r osx-64
- rm -r win-64
- rm -r win-32
- $HOME/miniconda/bin/conda-build --python 3.7 --output-folder . .
- $HOME/miniconda/bin/conda convert -p linux-32 linux-64/*.tar.bz2
- $HOME/miniconda/bin/conda convert -p win-32 linux-64/*.tar.bz2
- $HOME/miniconda/bin/conda convert -p osx-64 linux-64/*.tar.bz2
- $HOME/miniconda/bin/conda convert -p win-64 linux-64/*.tar.bz2
- $HOME/miniconda/bin/anaconda upload --label main linux-64/*.tar.bz2
- $HOME/miniconda/bin/anaconda upload --label main osx-64/*.tar.bz2
- $HOME/miniconda/bin/anaconda upload --label main win-64/*.tar.bz2
- $HOME/miniconda/bin/anaconda upload --label main win-32/*.tar.bz2
- $HOME/miniconda/bin/anaconda upload --label main linux-32/*.tar.bz2
- rm -r linux-64
- rm -r linux-32
- rm -r osx-64
- rm -r win-64
- rm -r win-32
- $HOME/miniconda/bin/conda-build --python 3.8 --output-folder . .
- $HOME/miniconda/bin/conda convert -p linux-32 linux-64/*.tar.bz2
- $HOME/miniconda/bin/conda convert -p win-32 linux-64/*.tar.bz2
- $HOME/miniconda/bin/conda convert -p osx-64 linux-64/*.tar.bz2
- $HOME/miniconda/bin/conda convert -p win-64 linux-64/*.tar.bz2
- $HOME/miniconda/bin/anaconda upload --label main linux-64/*.tar.bz2
- $HOME/miniconda/bin/anaconda upload --label main osx-64/*.tar.bz2
- $HOME/miniconda/bin/anaconda upload --label main win-64/*.tar.bz2
- $HOME/miniconda/bin/anaconda upload --label main win-32/*.tar.bz2
- $HOME/miniconda/bin/anaconda upload --label main linux-32/*.tar.bz2
- rm -r linux-64
- rm -r linux-32
- rm -r osx-64
- rm -r win-64
- rm -r win-32
- $HOME/miniconda/bin/conda-build --python 3.9 --output-folder . .
- $HOME/miniconda/bin/conda convert -p linux-32 linux-64/*.tar.bz2
- $HOME/miniconda/bin/conda convert -p win-32 linux-64/*.tar.bz2
- $HOME/miniconda/bin/conda convert -p osx-64 linux-64/*.tar.bz2
- $HOME/miniconda/bin/conda convert -p win-64 linux-64/*.tar.bz2
- $HOME/miniconda/bin/anaconda upload --label main linux-64/*.tar.bz2
- $HOME/miniconda/bin/anaconda upload --label main osx-64/*.tar.bz2
- $HOME/miniconda/bin/anaconda upload --label main win-64/*.tar.bz2
- $HOME/miniconda/bin/anaconda upload --label main win-32/*.tar.bz2
- $HOME/miniconda/bin/anaconda upload --label main linux-32/*.tar.bz2
- rm -r linux-64
- rm -r linux-32
- rm -r osx-64
- rm -r win-64
- rm -r win-32
\ No newline at end of file
diff --git a/.github/workflows/pypi-build.yaml b/.github/workflows/pypi-build.yaml
index ad31360..971d01c 100644
--- a/.github/workflows/pypi-build.yaml
+++ b/.github/workflows/pypi-build.yaml
@@ -1,4 +1,4 @@
-name: build
+name: Build and publish
on: [push]
diff --git a/.readthedocs.yml b/.readthedocs.yml
index 0754226..a7d7c21 100644
--- a/.readthedocs.yml
+++ b/.readthedocs.yml
@@ -26,4 +26,4 @@ sphinx:
# Optionally declare the Python requirements required to build your docs
python:
install:
- - requirements: doc/requirements.txt
\ No newline at end of file
+ - requirements: doc/requirements.txt
diff --git a/README.md b/README.md
index 6ecd486..e1b153f 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,5 @@
-[![image](https://coveralls.io/repos/github/MAVENSDC/cdflib/badge.svg?branch=master)](https://coveralls.io/github/MAVENSDC/cdflib?branch=master)
[![Actions Status](https://github.com/MAVENSDC/cdflib/workflows/ci/badge.svg)](https://github.com/MAVENSDC/cdflib/actions)
+[![codecov](https://codecov.io/gh/MAVENSDC/cdflib/branch/master/graph/badge.svg?token=IJ6moGc40e)](https://codecov.io/gh/MAVENSDC/cdflib)
[![DOI](https://zenodo.org/badge/102912691.svg)](https://zenodo.org/badge/latestdoi/102912691)
[![Documentation Status](https://readthedocs.org/projects/cdflib/badge/?version=latest)](https://cdflib.readthedocs.io/en/latest/?badge=latest)
diff --git a/cdflib/__init__.py b/cdflib/__init__.py
index 532b006..3c3c59b 100644
--- a/cdflib/__init__.py
+++ b/cdflib/__init__.py
@@ -2,20 +2,19 @@
from . import cdfread, cdfwrite
from .cdf_factory import CDF
-
from .epochs import CDFepoch as cdfepoch # noqa: F401
try:
# This is an optional dependency for astropy time conversions
from .epochs_astropy import CDFAstropy as cdfastropy
-except:
+except BaseException:
pass
try:
# Another optional dependency for XArray <-> cdf conversions
- from .xarray_to_cdf import xarray_to_cdf
from .cdf_to_xarray import cdf_to_xarray
-except:
+ from .xarray_to_cdf import xarray_to_cdf
+except BaseException:
pass
__all__ = ['CDF', 'xarray_to_cdf', 'cdf_to_xarray']
diff --git a/cdflib/cdf_to_xarray.py b/cdflib/cdf_to_xarray.py
index a6decd0..8fcf233 100644
--- a/cdflib/cdf_to_xarray.py
+++ b/cdflib/cdf_to_xarray.py
@@ -97,11 +97,10 @@ def _convert_cdf_to_dicts(filename, to_datetime=False, to_unixtime=False):
cdf_info = cdf_file.cdf_info()
all_cdf_variables = cdf_info['rVariables'] + cdf_info['zVariables']
-
# Gather all Global Attributes
try:
gatt = cdf_file.globalattsget()
- except:
+ except BaseException:
gatt = {}
# Gather all information about the CDF file, and store in the below dictionaries
@@ -129,52 +128,63 @@ def _convert_cdf_to_dicts(filename, to_datetime=False, to_unixtime=False):
return gatt, variable_attributes, variable_data, variable_properties
-def _verify_depend_dimensions(dataset, dimension_number, primary_variable_name, coordinate_variable_name, primary_variable_properties):
+
+def _verify_depend_dimensions(dataset, dimension_number, primary_variable_name,
+ coordinate_variable_name, primary_variable_properties):
primary_data = np.array(dataset[primary_variable_name])
coordinate_data = np.array(dataset[coordinate_variable_name])
if len(primary_data.shape) != 0 and len(coordinate_data.shape) == 0:
- print(f'ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but the dimensions do not match.')
+ print(
+ f'ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but the dimensions do not match.')
return False
if len(coordinate_data.shape) != 0 and len(primary_data.shape) == 0:
- print(f'ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but the dimensions do not match.')
+ print(
+ f'ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but the dimensions do not match.')
return False
if len(coordinate_data.shape) > 2:
- print(f'ISTP Compliance Warning: {coordinate_variable_name} has too many dimensions to be the DEPEND_{dimension_number} for variable {primary_variable_name}')
+ print(
+ f'ISTP Compliance Warning: {coordinate_variable_name} has too many dimensions to be the DEPEND_{dimension_number} for variable {primary_variable_name}')
return False
if len(coordinate_data.shape) == 2:
if primary_data.shape[0] != coordinate_data.shape[0]:
- print(f'ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but the Epoch dimensions do not match.')
+ print(
+ f'ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but the Epoch dimensions do not match.')
return False
if primary_variable_properties["Rec_Vary"] and primary_variable_properties["Last_Rec"] > 0:
if len(primary_data.shape) <= dimension_number:
- print(f'ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but {primary_variable_name} does not have that many dimensions')
+ print(
+ f'ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but {primary_variable_name} does not have that many dimensions')
return False
if primary_data.shape[dimension_number] != coordinate_data.shape[-1]:
- print(f'ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but the dimensions do not match.')
+ print(
+ f'ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but the dimensions do not match.')
return False
else:
- if len(primary_data.shape) <= dimension_number-1:
- print(f'ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but {primary_variable_name} does not have that many dimensions')
+ if len(primary_data.shape) <= dimension_number - 1:
+ print(
+ f'ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but {primary_variable_name} does not have that many dimensions')
return False
- if primary_data.shape[dimension_number-1] != coordinate_data.shape[-1]:
+ if primary_data.shape[dimension_number - 1] != coordinate_data.shape[-1]:
# This is kind of a hack for now.
# DEPEND_1 can sometimes refer to the first dimension in a variable, and sometimes the second.
# So we require both the first and second dimensions don't match the coordinate size before we definitely
# reject it.
if len(primary_data.shape) > dimension_number and primary_data.shape[dimension_number] != coordinate_data.shape[-1]:
- print(f'ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but the dimensions do not match.')
+ print(
+ f'ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but the dimensions do not match.')
return False
return True
+
def _discover_depend_variables(vardata, varatts, varprops):
# This loops through the variable attributes to discover which variables are the coordinates of other variables,
# Unfortunately, there is no easy way to tell this by looking at the variable ITSELF,
@@ -193,6 +203,7 @@ def _discover_depend_variables(vardata, varatts, varprops):
return list(set(list_of_depend_vars))
+
def _discover_uncertainty_variables(varatts):
# This loops through the variable attributes to discover which variables are the labels of other variables
# Unfortunately, there is no easy way to tell this by looking at the label variable itself
@@ -223,9 +234,8 @@ def _discover_label_variables(varatts, all_variable_properties, all_variable_dat
continue
depend_var_name = varatts[v][label_dependency]
- if all_variable_properties[depend_var_name]["Dim_Sizes"] and \
- len(all_variable_properties[v]["Dim_Sizes"]) > 0 and \
- (all_variable_properties[depend_var_name]["Dim_Sizes"][0] == all_variable_properties[v]["Dim_Sizes"][int(lab[-1])-1]):
+ if all_variable_properties[depend_var_name]["Dim_Sizes"] and len(all_variable_properties[v]["Dim_Sizes"]) > 0 and (
+ all_variable_properties[depend_var_name]["Dim_Sizes"][0] == all_variable_properties[v]["Dim_Sizes"][int(lab[-1]) - 1]):
if all_variable_data[depend_var_name].size == 0:
continue
else:
@@ -242,6 +252,7 @@ def _discover_label_variables(varatts, all_variable_properties, all_variable_dat
return list_of_label_vars
+
def _convert_fillvals_to_nan(var_data, var_atts, var_properties):
if var_atts is None:
@@ -274,6 +285,7 @@ def _convert_fillvals_to_nan(var_data, var_atts, var_properties):
new_data = np.array(np.nan)
return new_data
+
def _determine_record_dimensions(var_name, var_atts, var_data, var_props, depend_variables,
all_variable_data, all_variable_properties, created_unlimited_dims):
'''
@@ -324,7 +336,8 @@ def _determine_record_dimensions(var_name, var_atts, var_data, var_props, depend
udim_found = True
'''
- # If none of the above, check if the length of this variable dimension matches a non-specific one that has already been created
+ # If none of the above, check if the length of this variable dimension
+ # matches a non-specific one that has already been created
for udim in created_unlimited_dims:
if len(var_data) == created_unlimited_dims[udim]:
return udim, False, False
@@ -350,13 +363,13 @@ def _determine_record_dimensions(var_name, var_atts, var_data, var_props, depend
return None, False, False
# If none of the above, check if the length of this variable dimension matches a non-specific one that has already been created
- #for udim in created_unlimited_dims:
+ # for udim in created_unlimited_dims:
# if len(var_data) == created_unlimited_dims[udim]:
# return udim, False, False
# If none of the above, create a new dimension variable
#new_udim_name = 'record' + str(len(created_unlimited_dims))
- #return new_udim_name, False, True
+ # return new_udim_name, False, True
else:
@@ -398,7 +411,7 @@ def _determine_dimension_names(var_name, var_atts, var_data, var_props, depend_v
depend_i_variable_data = np.array(all_variable_data[depend_i_variable_name])
if not record_name_found:
- dimension_number = i-1
+ dimension_number = i - 1
else:
dimension_number = i
@@ -411,7 +424,7 @@ def _determine_dimension_names(var_name, var_atts, var_data, var_props, depend_v
depend_i_variable_data.size != 0 and \
len(var_data.shape) > dimension_number and \
(depend_i_variable_data.shape[1] == var_data.shape[dimension_number]):
- return_list.append((depend_i_variable_name+"_dim", dim_size, True, False))
+ return_list.append((depend_i_variable_name + "_dim", dim_size, True, False))
continue
else:
print(f"Warning: Variable {var_name} listed DEPEND_{str(i)} as {depend_i_variable_name}"
@@ -419,13 +432,13 @@ def _determine_dimension_names(var_name, var_atts, var_data, var_props, depend_v
# There may be occasions where there was no time-varying reccord identified, but the users intended for it
# to exist. Thus, all the DEPEND_X's are off by 1. We should still try to incorporate those.
- if 'DEPEND_' + str(i-1) in var_atts and not record_name_found:
- depend_i_variable_name = var_atts['DEPEND_' + str(i-1)]
+ if 'DEPEND_' + str(i - 1) in var_atts and not record_name_found:
+ depend_i_variable_name = var_atts['DEPEND_' + str(i - 1)]
if depend_i_variable_name in all_variable_properties:
depend_i_variable_data = np.array(all_variable_data[depend_i_variable_name])
if depend_i_variable_data.size != 0 and len(depend_i_variable_data.shape) == 1 and \
- len(var_data.shape) > i-1 and \
- (depend_i_variable_data.shape[0] == var_data.shape[i-1]):
+ len(var_data.shape) > i - 1 and \
+ (depend_i_variable_data.shape[0] == var_data.shape[i - 1]):
print(f"Warning: Variable {var_name} has no determined time-varying component, but "
f"{depend_i_variable_name} was determined to match closely with one of the dimensions."
f" It will be set automatically for convenience.")
@@ -433,15 +446,14 @@ def _determine_dimension_names(var_name, var_atts, var_data, var_props, depend_v
continue
elif len(depend_i_variable_data.shape) > 1 and \
depend_i_variable_data.size != 0 and \
- len(var_data.shape) > i-1 and \
- (depend_i_variable_data.shape[1] == var_data.shape[i-1]):
+ len(var_data.shape) > i - 1 and \
+ (depend_i_variable_data.shape[1] == var_data.shape[i - 1]):
print(f"Warning: Variable {var_name} has no determined time-varying component, but "
f"{depend_i_variable_name} was determined to match closely with one of the dimensions."
f" It will be set automatically for convenience.")
return_list.append((depend_i_variable_name + "_dim", dim_size, True, False))
continue
-
# Check if the variable is itself a dimension
if var_name in depend_variables:
if len(var_data.shape) == 2 and var_props["Last_Rec"] == 0:
@@ -471,11 +483,12 @@ def _determine_dimension_names(var_name, var_atts, var_data, var_props, depend_v
break
else:
# If none of the above, create a new non-specific dimension name
- return_list.append(('dim'+str(len(created_regular_dims)), dim_size, False, True))
+ return_list.append(('dim' + str(len(created_regular_dims)), dim_size, False, True))
created_regular_dims['dim' + str(len(created_regular_dims))] = dim_size
return return_list
+
def _reformat_variable_dims_and_data(var_dims, var_data):
if len(var_dims) > 0 and var_data is None:
var_data = np.array([])
@@ -493,6 +506,7 @@ def _reformat_variable_dims_and_data(var_dims, var_data):
return var_dims, var_data
+
def _generate_xarray_data_variables(all_variable_data, all_variable_attributes,
all_variable_properties, fillval_to_nan):
@@ -679,9 +693,8 @@ def cdf_to_xarray(filename, to_datetime=False, to_unixtime=False, fillval_to_nan
import xarray as xr
# Convert the CDF file into a series of dicts, so we don't need to keep reading the file
- global_attributes, all_variable_attributes, all_variable_data, all_variable_properties = _convert_cdf_to_dicts(filename,
- to_datetime=to_datetime,
- to_unixtime=to_unixtime)
+ global_attributes, all_variable_attributes, all_variable_data, all_variable_properties = _convert_cdf_to_dicts(
+ filename, to_datetime=to_datetime, to_unixtime=to_unixtime)
created_vars, depend_dimensions = _generate_xarray_data_variables(all_variable_data, all_variable_attributes,
all_variable_properties, fillval_to_nan)
@@ -699,12 +712,12 @@ def cdf_to_xarray(filename, to_datetime=False, to_unixtime=False, fillval_to_nan
if var_name in label_variables:
# If these are label variables, we'll deal with these later when the DEPEND variables come up
continue
- elif (var_name in depend_dimensions) or (var_name+'_dim' in depend_dimensions):
+ elif (var_name in depend_dimensions) or (var_name + '_dim' in depend_dimensions):
# If these are DEPEND variables, add them to the DataSet coordinates
created_coord_vars[var_name] = created_vars[var_name]
# Check if these coordinate variable have associated labels
for lab in label_variables:
- if label_variables[lab] == var_name: # Found one!
+ if label_variables[lab] == var_name: # Found one!
if len(created_vars[lab].dims) == len(created_vars[var_name].dims):
if created_vars[lab].size != created_vars[var_name].size:
print(f"Warning, label variable {lab} does not match the expected dimension sizes of {var_name}")
diff --git a/cdflib/cdfread.py b/cdflib/cdfread.py
index fdc1782..1dc14b3 100644
--- a/cdflib/cdfread.py
+++ b/cdflib/cdfread.py
@@ -103,7 +103,6 @@ def __init__(self, path, validate=False, string_encoding='ascii'):
self._f.close()
self._f = self.file.open('rb')
-
if (self.cdfversion == 3):
cdr_info, foffs = self._read_cdr(8)
gdr_info = self._read_gdr(foffs)
@@ -627,7 +626,7 @@ def globalattsget(self, expand=False, to_np=True):
if (expand is False):
# This exists to get rid of extraneous numpy arrays
- if type(entryData) == np.ndarray:
+ if isinstance(entryData, np.ndarray):
if len(entryData) == 1:
entryData = entryData[0]
@@ -747,7 +746,7 @@ def _uncompress_file(self, path):
def _read_ccr(self, byte_loc):
self._f.seek(byte_loc, 0)
block_size = int.from_bytes(self._f.read(8), 'big')
- self._f.seek(byte_loc+12)
+ self._f.seek(byte_loc + 12)
cproffset = int.from_bytes(self._f.read(8), 'big')
data_start = byte_loc + 32
@@ -759,7 +758,7 @@ def _read_ccr(self, byte_loc):
def _read_ccr2(self, byte_loc):
self._f.seek(byte_loc, 0)
block_size = int.from_bytes(self._f.read(4), 'big')
- self._f.seek(byte_loc+8)
+ self._f.seek(byte_loc + 8)
cproffset = int.from_bytes(self._f.read(4), 'big')
data_start = byte_loc + 20
@@ -777,7 +776,7 @@ def _read_cpr(self, byte_loc):
def _read_cpr3(self, byte_loc):
self._f.seek(byte_loc, 0)
block_size = int.from_bytes(self._f.read(8), 'big')
- cpr = self._f.read(block_size-8)
+ cpr = self._f.read(block_size - 8)
cType = int.from_bytes(cpr[4:8], 'big')
cParams = int.from_bytes(cpr[16:20], 'big')
@@ -787,7 +786,7 @@ def _read_cpr3(self, byte_loc):
def _read_cpr2(self, byte_loc):
self._f.seek(byte_loc, 0)
block_size = int.from_bytes(self._f.read(4), 'big')
- cpr = self._f.read(block_size-4)
+ cpr = self._f.read(block_size - 4)
cType = int.from_bytes(cpr[4:8], 'big')
cParams = int.from_bytes(cpr[16:20], 'big')
@@ -926,7 +925,7 @@ def _read_cdr(self, byte_loc: int):
self._f.seek(0)
self._f.seek(byte_loc)
block_size = int.from_bytes(self._f.read(8), 'big')
- cdr = self._f.read(block_size-8)
+ cdr = self._f.read(block_size - 8)
foffs = self._f.tell()
# _ = int.from_bytes(cdr[0:4],'big') #Section Type
# gdroff = int.from_bytes(cdr[4:12], 'big') # GDR Location
@@ -977,7 +976,7 @@ def _read_cdr(self, byte_loc: int):
def _read_cdr2(self, byte_loc):
self._f.seek(byte_loc, 0)
block_size = int.from_bytes(self._f.read(4), 'big')
- cdr = self._f.read(block_size-4)
+ cdr = self._f.read(block_size - 4)
foffs = self._f.tell()
# gdroff = int.from_bytes(cdr[4:8], 'big') # GDR Location
@@ -1014,7 +1013,7 @@ def _read_cdr2(self, byte_loc):
def _read_gdr(self, byte_loc):
self._f.seek(byte_loc, 0)
block_size = int.from_bytes(self._f.read(8), 'big') # Block Size
- gdr = self._f.read(block_size-8)
+ gdr = self._f.read(block_size - 8)
first_rvariable = int.from_bytes(gdr[4:12], 'big', signed=True)
first_zvariable = int.from_bytes(gdr[12:20], 'big', signed=True)
@@ -1031,7 +1030,7 @@ def _read_gdr(self, byte_loc):
rdim_sizes = []
for x in range(0, num_rdim):
ioff = 76 + x * 4
- rdim_sizes.append(int.from_bytes(gdr[ioff:ioff+4], 'big',
+ rdim_sizes.append(int.from_bytes(gdr[ioff:ioff + 4], 'big',
signed=True))
gdr_info = {}
@@ -1051,7 +1050,7 @@ def _read_gdr(self, byte_loc):
def _read_gdr2(self, byte_loc):
self._f.seek(byte_loc, 0)
block_size = int.from_bytes(self._f.read(4), 'big') # Block Size
- gdr = self._f.read(block_size-4)
+ gdr = self._f.read(block_size - 4)
first_rvariable = int.from_bytes(gdr[4:8], 'big', signed=True)
first_zvariable = int.from_bytes(gdr[8:12], 'big', signed=True)
@@ -1064,7 +1063,7 @@ def _read_gdr2(self, byte_loc):
rdim_sizes = []
for x in range(0, num_rdim):
ioff = 56 + x * 4
- rdim_sizes.append(int.from_bytes(gdr[ioff:ioff+4], 'big',
+ rdim_sizes.append(int.from_bytes(gdr[ioff:ioff + 4], 'big',
signed=True))
gdr_info = {}
@@ -1104,7 +1103,7 @@ def _read_varatts(self, var_num, zVar, expand, to_np=True):
entryData = aedr_info['entry']
if (expand is False):
# This exists to get rid of extraneous numpy arrays
- if type(entryData) == np.array:
+ if isinstance(entryData, np.ndarray):
if len(entryData) == 1:
entryData = entryData[0]
return_dict[adr_info['name']] = entryData
@@ -1270,7 +1269,7 @@ def _read_aedr_fast2(self, byte_loc):
next_aedr = int.from_bytes(self._f.read(4), 'big', signed=True)
# Variable number or global entry number
- self._f.seek(byte_loc+20, 0)
+ self._f.seek(byte_loc + 20, 0)
entry_num = int.from_bytes(self._f.read(4), 'big', signed=True)
return entry_num, next_aedr
@@ -1288,7 +1287,7 @@ def _read_aedr3(self, byte_loc, to_np=True):
"""
self._f.seek(byte_loc, 0)
block_size = int.from_bytes(self._f.read(8), 'big')
- aedr = self._f.read(block_size-8)
+ aedr = self._f.read(block_size - 8)
next_aedr = int.from_bytes(aedr[4:12], 'big', signed=True)
data_type = int.from_bytes(aedr[16:20], 'big', signed=True)
@@ -1405,11 +1404,11 @@ def _read_vdr3(self, byte_loc):
num_dims = int.from_bytes(vdr[332:336], 'big', signed=True)
for x in range(0, num_dims):
ioff = 336 + 4 * x
- zdim_sizes.append(int.from_bytes(vdr[ioff:ioff+4], 'big',
+ zdim_sizes.append(int.from_bytes(vdr[ioff:ioff + 4], 'big',
signed=True))
coff = 336 + 4 * num_dims
for x in range(0, num_dims):
- dim_varys.append(int.from_bytes(vdr[coff+4*x:coff+4*x+4],
+ dim_varys.append(int.from_bytes(vdr[coff + 4 * x:coff + 4 * x + 4],
'big', signed=True))
adj = 0
# Check for "False" dimensions, and delete them
@@ -1425,7 +1424,7 @@ def _read_vdr3(self, byte_loc):
# rvariable
for x in range(0, self._rvariables_num_dims):
ioff = 332 + 4 * x
- dim_varys.append(int.from_bytes(vdr[ioff:ioff+4], 'big',
+ dim_varys.append(int.from_bytes(vdr[ioff:ioff + 4], 'big',
signed=True))
for x in range(0, self._rvariables_num_dims):
if (dim_varys[x] != 0):
@@ -1474,7 +1473,7 @@ def _read_vdr2(self, byte_loc):
toadd = 128
self._f.seek(byte_loc, 0)
block_size = int.from_bytes(self._f.read(4), 'big')
- vdr = self._f.read(block_size-4)
+ vdr = self._f.read(block_size - 4)
# Type of internal record
section_type = int.from_bytes(vdr[0:4], 'big')
@@ -1490,29 +1489,29 @@ def _read_vdr2(self, byte_loc):
compression_bool = (flag_bits[29] == '1')
sparse = int.from_bytes(vdr[28:32], 'big', signed=True)
- num_elements = int.from_bytes(vdr[44+toadd:48+toadd], 'big', signed=True)
- var_num = int.from_bytes(vdr[48+toadd:52+toadd], 'big', signed=True)
- CPRorSPRoffset = int.from_bytes(vdr[52+toadd:56+toadd], 'big',
+ num_elements = int.from_bytes(vdr[44 + toadd:48 + toadd], 'big', signed=True)
+ var_num = int.from_bytes(vdr[48 + toadd:52 + toadd], 'big', signed=True)
+ CPRorSPRoffset = int.from_bytes(vdr[52 + toadd:56 + toadd], 'big',
signed=True)
- blocking_factor = int.from_bytes(vdr[56+toadd:60+toadd], 'big',
+ blocking_factor = int.from_bytes(vdr[56 + toadd:60 + toadd], 'big',
signed=True)
- name = str(vdr[60+toadd:124+toadd].decode(self.string_encoding))
+ name = str(vdr[60 + toadd:124 + toadd].decode(self.string_encoding))
name = name.replace('\x00', '')
zdim_sizes = []
dim_sizes = []
dim_varys = []
if (section_type == 8):
# zvariable
- num_dims = int.from_bytes(vdr[124+toadd:128+toadd], 'big',
+ num_dims = int.from_bytes(vdr[124 + toadd:128 + toadd], 'big',
signed=True)
for x in range(0, num_dims):
- xoff = 128 + toadd + 4*x
- zdim_sizes.append(int.from_bytes(vdr[xoff:xoff+4], 'big',
+ xoff = 128 + toadd + 4 * x
+ zdim_sizes.append(int.from_bytes(vdr[xoff:xoff + 4], 'big',
signed=True))
coff = 128 + toadd + 4 * num_dims
for x in range(0, num_dims):
icoff = coff + 4 * x
- if (int.from_bytes(vdr[icoff:icoff+4], 'big', signed=True) == 0):
+ if (int.from_bytes(vdr[icoff:icoff + 4], 'big', signed=True) == 0):
dim_varys.append(False)
else:
dim_varys.append(True)
@@ -1530,7 +1529,7 @@ def _read_vdr2(self, byte_loc):
# rvariable
for x in range(0, self._rvariables_num_dims):
ix = 124 + toadd + 4 * x
- if (int.from_bytes(vdr[ix:ix+4], 'big', signed=True) == 0):
+ if (int.from_bytes(vdr[ix:ix + 4], 'big', signed=True) == 0):
dim_varys.append(False)
else:
dim_varys.append(True)
@@ -1583,9 +1582,9 @@ def _read_vdr_fast(self, byte_loc):
return self._read_vdr_fast2(byte_loc)
def _read_vdr_fast3(self, byte_loc):
- self._f.seek(byte_loc+12, 0)
+ self._f.seek(byte_loc + 12, 0)
next_vdr = int.from_bytes(self._f.read(8), 'big', signed=True)
- self._f.seek(byte_loc+84, 0)
+ self._f.seek(byte_loc + 84, 0)
name = str(self._f.read(256).decode(self.string_encoding))
name = name.replace('\x00', '')
@@ -1598,9 +1597,9 @@ def _read_vdr_fast2(self, byte_loc):
else:
toadd = 128
- self._f.seek(byte_loc+8, 0)
+ self._f.seek(byte_loc + 8, 0)
next_vdr = int.from_bytes(self._f.read(4), 'big', signed=True)
- self._f.seek(byte_loc+toadd+64, 0)
+ self._f.seek(byte_loc + toadd + 64, 0)
name = str(self._f.read(64).decode(self.string_encoding))
name = name.replace('\x00', '')
@@ -1610,7 +1609,7 @@ def _read_vdr_fast2(self, byte_loc):
def _read_vxrs(self, byte_loc, vvr_offsets=[], vvr_start=[], vvr_end=[]):
self._f.seek(byte_loc, 0)
block_size = int.from_bytes(self._f.read(8), 'big', signed=True) # Block Size
- vxrs = self._f.read(block_size-8)
+ vxrs = self._f.read(block_size - 8)
next_vxr_pos = int.from_bytes(vxrs[4:12], 'big', signed=True)
num_ent = int.from_bytes(vxrs[12:16], 'big', signed=True)
@@ -1618,12 +1617,12 @@ def _read_vxrs(self, byte_loc, vvr_offsets=[], vvr_start=[], vvr_end=[]):
# coff = 20
for ix in range(0, num_ent_used):
soffset = 20 + 4 * ix
- num_start = int.from_bytes(vxrs[soffset:soffset+4], 'big',
+ num_start = int.from_bytes(vxrs[soffset:soffset + 4], 'big',
signed=True)
eoffset = 20 + 4 * num_ent + 4 * ix
- num_end = int.from_bytes(vxrs[eoffset:eoffset+4], 'big', signed=True)
+ num_end = int.from_bytes(vxrs[eoffset:eoffset + 4], 'big', signed=True)
ooffset = 20 + 2 * 4 * num_ent + 8 * ix
- rec_offset = int.from_bytes(vxrs[ooffset:ooffset+8], 'big',
+ rec_offset = int.from_bytes(vxrs[ooffset:ooffset + 8], 'big',
signed=True)
type_offset = 8 + rec_offset
self._f.seek(type_offset, 0)
@@ -1646,7 +1645,7 @@ def _read_vxrs2(self, byte_loc, vvr_offsets=[], vvr_start=[], vvr_end=[]):
self._f.seek(byte_loc, 0)
block_size = int.from_bytes(self._f.read(4), 'big', signed=True)
- vxrs = self._f.read(block_size-4)
+ vxrs = self._f.read(block_size - 4)
next_vxr_pos = int.from_bytes(vxrs[4:8], 'big', signed=True)
num_ent = int.from_bytes(vxrs[8:12], 'big', signed=True)
@@ -1654,12 +1653,12 @@ def _read_vxrs2(self, byte_loc, vvr_offsets=[], vvr_start=[], vvr_end=[]):
# coff = 16
for ix in range(0, num_ent_used):
soffset = 16 + 4 * ix
- num_start = int.from_bytes(vxrs[soffset:soffset+4], 'big',
+ num_start = int.from_bytes(vxrs[soffset:soffset + 4], 'big',
signed=True)
eoffset = 16 + 4 * num_ent + 4 * ix
- num_end = int.from_bytes(vxrs[eoffset:eoffset+4], 'big', signed=True)
+ num_end = int.from_bytes(vxrs[eoffset:eoffset + 4], 'big', signed=True)
ooffset = 16 + 2 * 4 * num_ent + 4 * ix
- rec_offset = int.from_bytes(vxrs[ooffset:ooffset+4], 'big',
+ rec_offset = int.from_bytes(vxrs[ooffset:ooffset + 4], 'big',
signed=True)
type_offset = 4 + rec_offset
self._f.seek(type_offset, 0)
@@ -1700,17 +1699,17 @@ def _read_vvrs(self, vdr_dict, vvr_offs, vvr_start, vvr_end, startrec, endrec, t
if (vvr_end[vvr_num] >= endrec):
lastBlock = vvr_num
break
- for vvr_num in range(firstBlock, (lastBlock+1)):
+ for vvr_num in range(firstBlock, (lastBlock + 1)):
if (self.cdfversion == 3):
var_block_data = self._read_vvr_block(vvr_offs[vvr_num])
else:
var_block_data = self._read_vvr_block2(vvr_offs[vvr_num])
asize = len(var_block_data)
- byte_stream[pos:pos+asize] = var_block_data
+ byte_stream[pos:pos + asize] = var_block_data
pos = pos + asize
startPos = (startrec - vvr_start[firstBlock]) * numBytes * numValues
stopOff = (vvr_end[lastBlock] - endrec) * numBytes * numValues
- byte_stream = byte_stream[startPos:len(byte_stream)-stopOff]
+ byte_stream = byte_stream[startPos:len(byte_stream) - stopOff]
else:
# with sparse records
if ('pad' in vdr_dict):
@@ -1726,7 +1725,7 @@ def _read_vvrs(self, vdr_dict, vvr_offs, vvr_start, vvr_end, startrec, endrec, t
vdr_dict['num_elements'])
cur_block = -1
rec_size = numBytes * numValues
- for rec_num in range(startrec, (endrec+1)):
+ for rec_num in range(startrec, (endrec + 1)):
block, prev_block = self._find_block(vvr_start, vvr_end,
cur_block, rec_num)
if (block > -1):
@@ -1738,12 +1737,12 @@ def _read_vvrs(self, vdr_dict, vvr_offs, vvr_start, vvr_end, startrec, endrec, t
var_block_data = self._read_vvr_block2(vvr_offs[block])
cur_block = block
xoff = record_off * rec_size
- byte_stream[pos:pos+rec_size] = var_block_data[xoff:
- xoff+rec_size]
+ byte_stream[pos:pos + rec_size] = var_block_data[xoff:
+ xoff + rec_size]
else:
if (vdr_dict['sparse'] == 1):
# use defined pad or default pad
- byte_stream[pos:pos+rec_size] = filled_data * numValues
+ byte_stream[pos:pos + rec_size] = filled_data * numValues
else:
# use previous physical record
if (prev_block != -1):
@@ -1755,9 +1754,9 @@ def _read_vvrs(self, vdr_dict, vvr_offs, vvr_start, vvr_end, startrec, endrec, t
vvr_offs[prev_block])
lastRecOff = (vvr_end[prev_block] -
vvr_start[prev_block]) * rec_size
- byte_stream[pos:pos+rec_size] = var_prev_block_data[lastRecOff:]
+ byte_stream[pos:pos + rec_size] = var_prev_block_data[lastRecOff:]
else:
- byte_stream[pos:pos+rec_size] = filled_data * numValues
+ byte_stream[pos:pos + rec_size] = filled_data * numValues
pos = pos + rec_size
if (block > -1):
cur_block = block
@@ -1775,7 +1774,7 @@ def _read_vvrs(self, vdr_dict, vvr_offs, vvr_start, vvr_end, startrec, endrec, t
else:
if (vdr_dict['data_type'] == 32):
y = self._convert_data(byte_stream, vdr_dict['data_type'],
- totalRecs, self._num_values(vdr_dict)*2,
+ totalRecs, self._num_values(vdr_dict) * 2,
vdr_dict['num_elements'])
else:
y = self._convert_data(byte_stream, vdr_dict['data_type'],
@@ -1888,7 +1887,7 @@ def _read_data(self, byte_stream, data_type, num_recs, num_elems, dimensions=Non
# for the numpy dtype. This requires us to squeeze
# the matrix later, to get rid of this extra dimension.
dt_string = self._convert_option()
- if dimensions != None:
+ if dimensions is not None:
if self._majority == 'Column_major':
dimensions = list(reversed(dimensions))
if (len(dimensions) == 1):
@@ -1905,15 +1904,15 @@ def _read_data(self, byte_stream, data_type, num_recs, num_elems, dimensions=Non
if data_type == 52 or data_type == 51:
# string
if dimensions is None:
- byte_data = bytearray(byte_stream[0:num_recs*num_elems])
+ byte_data = bytearray(byte_stream[0:num_recs * num_elems])
# In each record, check for the first '\x00' (null character).
# If found, make all the characters after it null as well.
for x in range(0, num_recs):
y = x * num_elems
- z = byte_data[y:y+num_elems].find(b'\x00')
+ z = byte_data[y:y + num_elems].find(b'\x00')
if (z > -1 and z < (num_elems - 1)):
byte_data[y + z + 1:y + num_elems] = b'\x00' * (num_elems - z - 1)
- ret = byte_data[0:num_recs*num_elems].decode(self.string_encoding, errors='ignore').replace('\x00', '')
+ ret = byte_data[0:num_recs * num_elems].decode(self.string_encoding, errors='ignore').replace('\x00', '')
else:
# Count total number of strings
count = 1
@@ -1921,16 +1920,16 @@ def _read_data(self, byte_stream, data_type, num_recs, num_elems, dimensions=Non
count = count * dimensions[x]
strings = []
if (len(dimensions) == 0):
- for i in range(0, num_recs*count*num_elems, num_elems):
- string1 = byte_stream[i:i+num_elems].decode(self.string_encoding, errors='ignore').\
+ for i in range(0, num_recs * count * num_elems, num_elems):
+ string1 = byte_stream[i:i + num_elems].decode(self.string_encoding, errors='ignore').\
replace('\x00', '')
strings.append(string1)
else:
for x in range(0, num_recs):
onerec = []
- for i in range(x*count*num_elems, (x+1)*count*num_elems,
+ for i in range(x * count * num_elems, (x + 1) * count * num_elems,
num_elems):
- string1 = byte_stream[i:i+num_elems].decode(self.string_encoding, errors='ignore')\
+ string1 = byte_stream[i:i + num_elems].decode(self.string_encoding, errors='ignore')\
.replace('\x00', '')
onerec.append(string1)
strings.append(onerec)
@@ -1962,7 +1961,7 @@ def _read_data(self, byte_stream, data_type, num_recs, num_elems, dimensions=Non
elif (data_type == 32):
dt_string += 'c16'
dt = np.dtype(dt_string)
- ret = np.frombuffer(byte_stream, dtype=dt, count=num_recs*num_elems)
+ ret = np.frombuffer(byte_stream, dtype=dt, count=num_recs * num_elems)
try:
ret.setflags('WRITEABLE')
except ValueError:
@@ -1970,7 +1969,7 @@ def _read_data(self, byte_stream, data_type, num_recs, num_elems, dimensions=Non
pass
if squeeze_needed:
- ret = np.squeeze(ret, axis=(ret.ndim-1))
+ ret = np.squeeze(ret, axis=(ret.ndim - 1))
if dimensions is not None:
dimensions.pop()
@@ -2054,7 +2053,7 @@ def _read_vardata(self, vdr_info, epoch=None, starttime=None, endtime=None,
if (vdr_info['record_vary']):
# Record varying
- if (starttime != None or endtime != None):
+ if (starttime is not None or endtime is not None):
recs = self._findtimerecords(vdr_info['name'], starttime,
endtime, epoch=epoch)
if recs is None:
@@ -2098,9 +2097,9 @@ def _read_vardata(self, vdr_info, epoch=None, starttime=None, endtime=None,
def _findtimerecords(self, var_name, starttime, endtime, epoch=None):
- if (epoch != None):
+ if (epoch is not None):
vdr_info = self.varinq(epoch)
- if (vdr_info == None):
+ if (vdr_info is None):
raise ValueError('Epoch not found')
if (vdr_info['Data_Type'] == 31 or vdr_info['Data_Type'] == 32 or
vdr_info['Data_Type'] == 33):
@@ -2174,37 +2173,37 @@ def _default_pad(self, data_type, num_elms):
"""
order = self._convert_option()
if (data_type == 51 or data_type == 52):
- return str(' '*num_elms)
+ return str(' ' * num_elms)
if (data_type == 1) or (data_type == 41):
- pad_value = struct.pack(order+'b', -127)
+ pad_value = struct.pack(order + 'b', -127)
dt_string = 'i1'
elif data_type == 2:
- pad_value = struct.pack(order+'h', -32767)
+ pad_value = struct.pack(order + 'h', -32767)
dt_string = 'i2'
elif data_type == 4:
- pad_value = struct.pack(order+'i', -2147483647)
+ pad_value = struct.pack(order + 'i', -2147483647)
dt_string = 'i4'
elif (data_type == 8) or (data_type == 33):
- pad_value = struct.pack(order+'q', -9223372036854775807)
+ pad_value = struct.pack(order + 'q', -9223372036854775807)
dt_string = 'i8'
elif data_type == 11:
- pad_value = struct.pack(order+'B', 254)
+ pad_value = struct.pack(order + 'B', 254)
dt_string = 'u1'
elif data_type == 12:
- pad_value = struct.pack(order+'H', 65534)
+ pad_value = struct.pack(order + 'H', 65534)
dt_string = 'u2'
elif data_type == 14:
- pad_value = struct.pack(order+'I', 4294967294)
+ pad_value = struct.pack(order + 'I', 4294967294)
dt_string = 'u4'
elif (data_type == 21) or (data_type == 44):
- pad_value = struct.pack(order+'f', -1.0E30)
+ pad_value = struct.pack(order + 'f', -1.0E30)
dt_string = 'f'
elif (data_type == 22) or (data_type == 45) or (data_type == 31):
- pad_value = struct.pack(order+'d', -1.0E30)
+ pad_value = struct.pack(order + 'd', -1.0E30)
dt_string = 'd'
else:
# (data_type == 32):
- pad_value = struct.pack(order+'2d', *[-1.0E30, -1.0E30])
+ pad_value = struct.pack(order + '2d', *[-1.0E30, -1.0E30])
dt_string = 'c16'
dt = np.dtype(dt_string)
@@ -2212,7 +2211,7 @@ def _default_pad(self, data_type, num_elms):
try:
ret.setflags('WRITEABLE')
except Exception:
- #TODO: Figure out why we need to array set to writeable
+ # TODO: Figure out why we need to array set to writeable
pass
return ret
@@ -2222,7 +2221,7 @@ def _convert_np_data(self, data, data_type, num_elems):
"""
if (data_type == 51 or data_type == 52):
if (data == ''):
- return ('\x00'*num_elems).encode()
+ return ('\x00' * num_elems).encode()
else:
return data.ljust(num_elems, '\x00').encode(self.string_encoding)
elif (data_type == 32):
@@ -2238,13 +2237,13 @@ def _read_vvr_block(self, offset):
"""
self._f.seek(offset, 0)
block_size = int.from_bytes(self._f.read(8), 'big')
- block = self._f.read(block_size-8)
+ block = self._f.read(block_size - 8)
section_type = int.from_bytes(block[0:4], 'big')
if section_type == 13:
# a CVVR
compressed_size = int.from_bytes(block[8:16], 'big')
- return gzip.decompress(block[16:16+compressed_size])
+ return gzip.decompress(block[16:16 + compressed_size])
elif section_type == 7:
# a VVR
return block[4:]
@@ -2255,13 +2254,13 @@ def _read_vvr_block2(self, offset):
"""
self._f.seek(offset, 0)
block_size = int.from_bytes(self._f.read(4), 'big')
- block = self._f.read(block_size-4)
+ block = self._f.read(block_size - 4)
section_type = int.from_bytes(block[0:4], 'big')
if section_type == 13:
# a CVVR
compressed_size = int.from_bytes(block[8:12], 'big')
- return gzip.decompress(block[12:12+compressed_size])
+ return gzip.decompress(block[12:12 + compressed_size])
elif section_type == 7:
# a VVR
return block[4:]
@@ -2282,7 +2281,7 @@ def _find_block(starts, ends, cur_block, rec_num):
return x, x
if (starts[x] > rec_num):
break
- return -1, x-1
+ return -1, x - 1
def _convert_data(self, data, data_type, num_recs, num_values, num_elems):
"""
@@ -2291,15 +2290,15 @@ def _convert_data(self, data, data_type, num_recs, num_values, num_elems):
"""
if (data_type == 51 or data_type == 52):
- return [data[i:i+num_elems].decode(self.string_encoding) for i in
- range(0, num_recs*num_values*num_elems, num_elems)]
+ return [data[i:i + num_elems].decode(self.string_encoding) for i in
+ range(0, num_recs * num_values * num_elems, num_elems)]
else:
tofrom = self._convert_option()
dt_string = self._convert_type(data_type)
form = tofrom + str(num_recs * num_values * num_elems) + dt_string
value_len = self._type_size(data_type, num_elems)
return list(struct.unpack_from(form,
- data[0:num_recs*num_values*value_len]))
+ data[0:num_recs * num_values * value_len]))
@staticmethod
def getVersion():
diff --git a/cdflib/cdfwrite.py b/cdflib/cdfwrite.py
index 959bee4..425b006 100644
--- a/cdflib/cdfwrite.py
+++ b/cdflib/cdfwrite.py
@@ -32,6 +32,7 @@ def ensure_open(self, *args, **kwargs):
return ensure_open
+
class CDF:
"""
Creates an empty CDF file.
@@ -290,7 +291,7 @@ def close(self):
with self.path.open('rb+') as f:
f.seek(0, 2)
eof = f.tell()
- self._update_offset_value(f, self.gdr_head+36, 8, eof)
+ self._update_offset_value(f, self.gdr_head + 36, 8, eof)
if self.checksum:
f.write(self._md5_compute(f))
self.is_closed = True
@@ -299,7 +300,7 @@ def close(self):
with self.path.open('rb+') as f:
f.seek(0, 2)
eof = f.tell()
- self._update_offset_value(f, self.gdr_head+36, 8, eof)
+ self._update_offset_value(f, self.gdr_head + 36, 8, eof)
with self.compressed_file.open('wb+') as g:
g.write(bytearray.fromhex(self.V3magicNUMBER_1))
@@ -442,17 +443,17 @@ def write_globalattrs(self, globalAttrs):
dataType, numElems, None)
if (entries == 0):
# ADR's AgrEDRhead
- self._update_offset_value(f, offsetADR+20, 8, offset)
+ self._update_offset_value(f, offsetADR + 20, 8, offset)
else:
# ADR's ADRnext
- self._update_offset_value(f, poffset+12, 8, offset)
+ self._update_offset_value(f, poffset + 12, 8, offset)
poffset = offset
entries = entries + 1
# ADR's NgrEntries
- self._update_offset_value(f, offsetADR+36, 4, entries)
+ self._update_offset_value(f, offsetADR + 36, 4, entries)
# ADR's MAXgrEntry
- self._update_offset_value(f, offsetADR+40, 4, entryNumMaX)
+ self._update_offset_value(f, offsetADR + 40, 4, entryNumMaX)
@is_open
def write_variableattrs(self, variableAttrs):
@@ -603,25 +604,25 @@ def write_variableattrs(self, variableAttrs):
if (entries == 0):
if zVar:
# ADR's AzEDRhead
- self._update_offset_value(f, offsetA+48, 8, offset)
+ self._update_offset_value(f, offsetA + 48, 8, offset)
else:
# ADR's AgrEDRhead
- self._update_offset_value(f, offsetA+20, 8, offset)
+ self._update_offset_value(f, offsetA + 20, 8, offset)
else:
# ADR's ADRnext
- self._update_offset_value(f, poffset+12, 8, offset)
+ self._update_offset_value(f, poffset + 12, 8, offset)
poffset = offset
entries = entries + 1
if zVar:
# ADR's NzEntries
- self._update_offset_value(f, offsetA+56, 4, entries)
+ self._update_offset_value(f, offsetA + 56, 4, entries)
# ADR's MAXzEntry
- self._update_offset_value(f, offsetA+60, 4, entryNumX)
+ self._update_offset_value(f, offsetA + 60, 4, entryNumX)
else:
# ADR's NgrEntries
- self._update_offset_value(f, offsetA+36, 4, entries)
+ self._update_offset_value(f, offsetA + 36, 4, entries)
# ADR's MAXgrEntry
- self._update_offset_value(f, offsetA+40, 4, entryNumX)
+ self._update_offset_value(f, offsetA + 40, 4, entryNumX)
@is_open
def write_var(self, var_spec, var_attrs=None, var_data=None):
@@ -793,11 +794,11 @@ def write_var(self, var_spec, var_attrs=None, var_data=None):
if zVar:
if len(self.zvars) == 1:
# GDR's zVDRhead
- self._update_offset_value(f, self.gdr_head+20, 8, offset)
+ self._update_offset_value(f, self.gdr_head + 20, 8, offset)
else:
if len(self.rvars) == 1:
# GDR's rVDRhead
- self._update_offset_value(f, self.gdr_head+12, 8, offset)
+ self._update_offset_value(f, self.gdr_head + 12, 8, offset)
# Write the variable attributes
if var_attrs is not None:
@@ -834,10 +835,10 @@ def write_var(self, var_spec, var_attrs=None, var_data=None):
# Update GDR MaxRec if writing an r variable
if not zVar:
# GDR's rMaxRec
- f.seek(self.gdr_head+52)
+ f.seek(self.gdr_head + 52)
maxRec = int.from_bytes(f.read(4), 'big', signed=True)
if (maxRec < varMaxRec):
- self._update_offset_value(f, self.gdr_head+52, 4, varMaxRec)
+ self._update_offset_value(f, self.gdr_head + 52, 4, varMaxRec)
def _write_var_attrs(self, f, varNum, var_attrs, zVar):
'''
@@ -868,7 +869,7 @@ def _write_var_attrs(self, f, varNum, var_attrs, zVar):
attrNum, offset = self._write_adr(f, False, attr)
if (len(self.attrs) == 0):
# GDR's ADRhead
- self._update_offset_value(self.grd_offset+28, 8, offset)
+ self._update_offset_value(self.grd_offset + 28, 8, offset)
else:
attrNum = self.attrs.index(attr)
offset = self.attrsinfo[attrNum][2]
@@ -1006,7 +1007,7 @@ def _write_var_data_nonsparse(self, f, zVar: bool, var, dataType, numElems,
usedEntries = 0
numVXRs = 0
if compression > 0:
- default_blockingfactor = math.ceil(self.BLOCKING_BYTES/(numValues * dataTypeSize))
+ default_blockingfactor = math.ceil(self.BLOCKING_BYTES / (numValues * dataTypeSize))
# If the given blocking factor is too small, use the default one
# Will re-adjust if the records are less than this computed BF.
if (blockingfactor < default_blockingfactor):
@@ -1032,7 +1033,7 @@ def _write_var_data_nonsparse(self, f, zVar: bool, var, dataType, numElems,
startrec = x * blockingfactor
startloc = startrec * numValues * dataTypeSize
endrec = (x + 1) * blockingfactor - 1
- if (endrec > (recs-1)):
+ if (endrec > (recs - 1)):
endrec = recs - 1
endloc = (endrec + 1) * numValues * dataTypeSize
if (endloc > len(data)):
@@ -1064,28 +1065,28 @@ def _write_var_data_nonsparse(self, f, zVar: bool, var, dataType, numElems,
usedEntries = self._use_vxrentry(f, VXRoffset, startrec,
endrec, n1offset)
# Edit the VXRnext field of the previous VXR
- self._update_offset_value(f, savedVXRoffset+12, 8, VXRoffset)
+ self._update_offset_value(f, savedVXRoffset + 12, 8, VXRoffset)
# Edit the VXRtail of the VDR
- self._update_offset_value(f, vdr_offset+36, 8, VXRoffset)
+ self._update_offset_value(f, vdr_offset + 36, 8, VXRoffset)
# After we're done with the blocks, check the way
# we have VXRs set up
if (numVXRs > self.NUM_VXRlvl_ENTRIES):
newvxrhead, newvxrtail = self._add_vxr_levels_r(f, VXRhead,
numVXRs)
- self._update_offset_value(f, vdr_offset+28, 8, newvxrhead)
- self._update_offset_value(f, vdr_offset+36, 8, newvxrtail)
+ self._update_offset_value(f, vdr_offset + 28, 8, newvxrhead)
+ self._update_offset_value(f, vdr_offset + 36, 8, newvxrtail)
else:
# Create one VVR and VXR, with one VXR entry
offset = self._write_vvr(f, data)
VXRoffset = self._write_vxr(f)
- usedEntries = self._use_vxrentry(f, VXRoffset, 0, recs-1, offset)
+ usedEntries = self._use_vxrentry(f, VXRoffset, 0, recs - 1, offset)
self._update_vdr_vxrheadtail(f, vdr_offset, VXRoffset)
# VDR's MaxRec
- self._update_offset_value(f, vdr_offset+24, 4, recs-1)
+ self._update_offset_value(f, vdr_offset + 24, 4, recs - 1)
- return (recs-1)
+ return (recs - 1)
def _write_var_data_sparse(self, f, zVar: bool, var, dataType, numElems, recVary,
oneblock):
@@ -1129,7 +1130,7 @@ def _write_var_data_sparse(self, f, zVar: bool, var, dataType, numElems, recVary
# Write one VVR
offset = self._write_vvr(f, data)
- f.seek(vdr_offset+28, 0)
+ f.seek(vdr_offset + 28, 0)
# Get first VXR
vxrOne = int.from_bytes(f.read(8), 'big', signed=True)
@@ -1142,7 +1143,7 @@ def _write_var_data_sparse(self, f, zVar: bool, var, dataType, numElems, recVary
# have a VXR
f.seek(vxrOne, 0)
currentVXR = f.tell()
- f.seek(vxrOne+12, 0)
+ f.seek(vxrOne + 12, 0)
vxrNext = int.from_bytes(f.read(8), 'big', signed=True)
nEntries = int.from_bytes(f.read(4), 'big', signed=True)
usedEntries = int.from_bytes(f.read(4), 'big', signed=True)
@@ -1162,10 +1163,10 @@ def _write_var_data_sparse(self, f, zVar: bool, var, dataType, numElems, recVary
self._use_vxrentry(f, currentVXR, rec_start, rec_end, offset)
# Modify the VDR's MaxRec if needed
- f.seek(vdr_offset+24, 0)
+ f.seek(vdr_offset + 24, 0)
recNumc = int.from_bytes(f.read(4), 'big', signed=True)
if (rec_end > recNumc):
- self._update_offset_value(f, vdr_offset+24, 4, rec_end)
+ self._update_offset_value(f, vdr_offset + 24, 4, rec_end)
return rec_end
@@ -1197,12 +1198,12 @@ def _create_vxr(self, f, recStart, recEnd, currentVDR, priorVXR, vvrOffset):
self._use_vxrentry(f, vxroffset, recStart, recEnd, vvrOffset)
if (priorVXR == 0):
# VDR's VXRhead
- self._update_offset_value(f, currentVDR+28, 8, vxroffset)
+ self._update_offset_value(f, currentVDR + 28, 8, vxroffset)
else:
# VXR's next
- self._update_offset_value(f, priorVXR+12, 8, vxroffset)
+ self._update_offset_value(f, priorVXR + 12, 8, vxroffset)
# VDR's VXRtail
- self._update_offset_value(f, currentVDR+36, 8, vxroffset)
+ self._update_offset_value(f, currentVDR + 36, 8, vxroffset)
return vxroffset
def _use_vxrentry(self, f, VXRoffset, recStart, recEnd, offset):
@@ -1210,22 +1211,22 @@ def _use_vxrentry(self, f, VXRoffset, recStart, recEnd, offset):
Adds a VVR pointer to a VXR
'''
# Select the next unused entry in a VXR for a VVR/CVVR
- f.seek(VXRoffset+20)
+ f.seek(VXRoffset + 20)
# num entries
numEntries = int.from_bytes(f.read(4), 'big', signed=True)
# used entries
usedEntries = int.from_bytes(f.read(4), 'big', signed=True)
# VXR's First
- self._update_offset_value(f, VXRoffset+28+4*usedEntries, 4, recStart)
+ self._update_offset_value(f, VXRoffset + 28 + 4 * usedEntries, 4, recStart)
# VXR's Last
- self._update_offset_value(f, VXRoffset+28+4*numEntries+4*usedEntries,
+ self._update_offset_value(f, VXRoffset + 28 + 4 * numEntries + 4 * usedEntries,
4, recEnd)
# VXR's Offset
- self._update_offset_value(f, VXRoffset+28+2*4*numEntries+8*usedEntries,
+ self._update_offset_value(f, VXRoffset + 28 + 2 * 4 * numEntries + 8 * usedEntries,
8, offset)
# VXR's NusedEntries
usedEntries += 1
- self._update_offset_value(f, VXRoffset+24, 4, usedEntries)
+ self._update_offset_value(f, VXRoffset + 24, 4, usedEntries)
return usedEntries
def _add_vxr_levels_r(self, f, vxrhead, numVXRs):
@@ -1269,7 +1270,7 @@ def _add_vxr_levels_r(self, f, vxrhead, numVXRs):
for x in range(0, newNumVXRs):
newvxroff = self._write_vxr(f, numEntries=self.NUM_VXRlvl_ENTRIES)
if (x > 0):
- self._update_offset_value(f, prevxroff+12, 8, newvxroff)
+ self._update_offset_value(f, prevxroff + 12, 8, newvxroff)
else:
newvxrhead = newvxroff
prevxroff = newvxroff
@@ -1283,13 +1284,13 @@ def _add_vxr_levels_r(self, f, vxrhead, numVXRs):
for _ in range(0, endEntry):
recFirst, recLast = self._get_recrange(f, vxroff)
self._use_vxrentry(f, newvxroff, recFirst, recLast, vxroff)
- vxroff = self._read_offset_value(f, vxroff+12, 8)
+ vxroff = self._read_offset_value(f, vxroff + 12, 8)
vxroff = vxrhead
# Break the horizontal links
for x in range(0, numVXRs):
- nvxroff = self._read_offset_value(f, vxroff+12, 8)
- self._update_offset_value(f, vxroff+12, 8, 0)
+ nvxroff = self._read_offset_value(f, vxroff + 12, 8)
+ self._update_offset_value(f, vxroff + 12, 8, 0)
vxroff = nvxroff
# Iterate this process if we're over NUM_VXRlvl_ENTRIES
@@ -1303,16 +1304,16 @@ def _update_vdr_vxrheadtail(self, f, vdr_offset, VXRoffset):
This sets a VXR to be the first and last VXR in the VDR
'''
# VDR's VXRhead
- self._update_offset_value(f, vdr_offset+28, 8, VXRoffset)
+ self._update_offset_value(f, vdr_offset + 28, 8, VXRoffset)
# VDR's VXRtail
- self._update_offset_value(f, vdr_offset+36, 8, VXRoffset)
+ self._update_offset_value(f, vdr_offset + 36, 8, VXRoffset)
def _get_recrange(self, f, VXRoffset):
'''
Finds the first and last record numbers pointed by the VXR
Assumes the VXRs are in order
'''
- f.seek(VXRoffset+20)
+ f.seek(VXRoffset + 20)
# Num entries
numEntries = int.from_bytes(f.read(4), 'big', signed=True)
# used entries
@@ -1320,7 +1321,7 @@ def _get_recrange(self, f, VXRoffset):
# VXR's First record
firstRec = int.from_bytes(f.read(4), 'big', signed=True)
# VXR's Last record
- f.seek(VXRoffset+28+(4*numEntries+4*(usedEntries-1)))
+ f.seek(VXRoffset + 28 + (4 * numEntries + 4 * (usedEntries - 1)))
lastRec = int.from_bytes(f.read(4), 'big', signed=True)
return firstRec, lastRec
@@ -1539,7 +1540,7 @@ def _write_cdr(self, f, major, encoding, checksum) -> int:
cdr[48:52] = struct.pack('>i', identifier)
cdr[52:56] = struct.pack('>i', rfuE)
tofill = self.CDF_COPYRIGHT_LEN - len(copy_right)
- cdr[56:block_size] = (copy_right+'\0'*tofill).encode()
+ cdr[56:block_size] = (copy_right + '\0' * tofill).encode()
f.write(cdr)
return byte_loc
@@ -1581,7 +1582,7 @@ def _write_gdr(self, f) -> int:
gdr[80:84] = struct.pack('>i', rfuE)
if (num_rdim > 0):
for i in range(0, num_rdim):
- gdr[84+i*4:84+(i+1)*4] = struct.pack('>i', self.rdim_sizes[i])
+ gdr[84 + i * 4:84 + (i + 1) * 4] = struct.pack('>i', self.rdim_sizes[i])
f.write(gdr)
return byte_loc
@@ -1641,7 +1642,7 @@ def _write_adr(self, f, gORv, name) -> Tuple[int, int]:
adr[60:64] = struct.pack('>i', maxzEntry)
adr[64:68] = struct.pack('>i', rfuE)
tofill = 256 - len(name)
- adr[68:324] = (name+'\0'*tofill).encode()
+ adr[68:324] = (name + '\0' * tofill).encode()
f.write(adr)
info = []
info.append(name)
@@ -1656,14 +1657,14 @@ def _write_adr(self, f, gORv, name) -> Tuple[int, int]:
self.attrs.append(name)
if (num > 0):
# ADR's ADRnext
- self._update_offset_value(f, self.attrsinfo[num-1][2]+12, 8,
+ self._update_offset_value(f, self.attrsinfo[num - 1][2] + 12, 8,
byte_loc)
else:
# GDR's ADRhead
- self._update_offset_value(f, self.gdr_head+28, 8, byte_loc)
+ self._update_offset_value(f, self.gdr_head + 28, 8, byte_loc)
# GDR's NumAttr
- self._update_offset_value(f, self.gdr_head+48, 4, num+1)
+ self._update_offset_value(f, self.gdr_head + 48, 4, num + 1)
return num, byte_loc
@@ -1696,7 +1697,7 @@ def _write_aedr(self, f, gORz, attrNum, entryNum, value, pdataType,
'''
f.seek(0, 2)
byte_loc = f.tell()
- if (gORz == True or zVar != True):
+ if (gORz or zVar != True):
section_type = self.AgrEDR_
else:
section_type = self.AzEDR_
@@ -1704,7 +1705,7 @@ def _write_aedr(self, f, gORz, attrNum, entryNum, value, pdataType,
if pdataType is None:
# Figure out Data Type if not supplied
- if hasattr(value, '__len__') and not isinstance(value, str):
+ if hasattr(value, '__len__') and not isinstance(value, str):
avalue = value[0]
else:
avalue = value
@@ -1859,7 +1860,7 @@ def _write_vdr(self, f, cdataType, numElems, numDims, dimSizes, name,
if (dataType == 51 or dataType == 52):
# pad needs to be the correct number of elements
if (len(pad) < numElems):
- pad += '\0'*(numElems-len(pad))
+ pad += '\0' * (numElems - len(pad))
elif (len(pad) > numElems):
pad = pad[:numElems]
pad = pad.encode()
@@ -1891,23 +1892,23 @@ def _write_vdr(self, f, cdataType, numElems, numDims, dimSizes, name,
vdr[72:80] = struct.pack('>q', offsetCPRorSPR)
vdr[80:84] = struct.pack('>i', blockingFactor)
tofill = 256 - len(name)
- vdr[84:340] = (name+'\0'*tofill).encode()
+ vdr[84:340] = (name + '\0' * tofill).encode()
if zVar:
vdr[340:344] = struct.pack('>i', numDims)
if (numDims > 0):
for i in range(0, numDims):
- vdr[344+i*4:344+(i+1)*4] = struct.pack('>i', dimSizes[i])
- ist = 344+numDims*4
+ vdr[344 + i * 4:344 + (i + 1) * 4] = struct.pack('>i', dimSizes[i])
+ ist = 344 + numDims * 4
for i in range(0, numDims):
- vdr[ist+i*4:ist+(i+1)*4] = struct.pack('>i', self.VARY)
+ vdr[ist + i * 4:ist + (i + 1) * 4] = struct.pack('>i', self.VARY)
ist = 344 + 8 * numDims
else:
if (numDims > 0):
for i in range(0, numDims):
- if (dimVary[i] == True or dimVary[i] != 0):
- vdr[340+i*4:344+i*4] = struct.pack('>i', self.VARY)
+ if (dimVary[i] or dimVary[i] != 0):
+ vdr[340 + i * 4:344 + i * 4] = struct.pack('>i', self.VARY)
else:
- vdr[340+i*4:344+i*4] = struct.pack('>i', self.NOVARY)
+ vdr[340 + i * 4:344 + i * 4] = struct.pack('>i', self.NOVARY)
ist = 340 + 4 * numDims
vdr[ist:block_size] = pad
f.write(vdr)
@@ -1930,19 +1931,19 @@ def _write_vdr(self, f, cdataType, numElems, numDims, dimSizes, name,
self.zvars.append(name)
if (num > 0):
# VDR's VDRnext
- self._update_offset_value(f, self.zvarsinfo[num-1][1]+12, 8,
+ self._update_offset_value(f, self.zvarsinfo[num - 1][1] + 12, 8,
byte_loc)
# GDR's NzVars
- self._update_offset_value(f, self.gdr_head+60, 4, num+1)
+ self._update_offset_value(f, self.gdr_head + 60, 4, num + 1)
else:
self.rvarsinfo[num] = info
self.rvars.append(name)
if (num > 0):
# VDR's VDRnext
- self._update_offset_value(f, self.rvarsinfo[num-1][1]+12, 8,
+ self._update_offset_value(f, self.rvarsinfo[num - 1][1] + 12, 8,
byte_loc)
# GDR's NrVars
- self._update_offset_value(f, self.gdr_head+44, 4, num+1)
+ self._update_offset_value(f, self.gdr_head + 44, 4, num + 1)
return num, byte_loc
@@ -1957,7 +1958,7 @@ def _write_vxr(self, f, numEntries=None):
byte_loc = f.tell()
section_type = self.VXR_
nextVXR = 0
- if (numEntries == None):
+ if (numEntries is None):
nEntries = self.NUM_VXR_ENTRIES
else:
nEntries = int(numEntries)
@@ -1973,9 +1974,9 @@ def _write_vxr(self, f, numEntries=None):
vxr[12:20] = struct.pack('>q', nextVXR)
vxr[20:24] = struct.pack('>i', nEntries)
vxr[24:28] = struct.pack('>i', nUsedEntries)
- estart = 28 + 4*nEntries
+ estart = 28 + 4 * nEntries
vxr[28:estart] = struct.pack('>%si' % nEntries, *firsts)
- eend = estart + 4*nEntries
+ eend = estart + 4 * nEntries
vxr[estart:eend] = struct.pack('>%si' % nEntries, *lasts)
vxr[eend:block_size] = struct.pack('>%sq' % nEntries, *offsets)
f.write(vxr)
@@ -2160,9 +2161,9 @@ def _convert_nptype(data_type, data):
return np.float64(data).tobytes()
elif (data_type == 32):
return np.complex128(data).tobytes()
- elif ((data_type) == 51) or ((data_type)==52):
+ elif ((data_type) == 51) or ((data_type) == 52):
utf8_bytes = np.asarray(data).astype('U').tobytes()
- return utf8_bytes.decode().replace('\x00', '').encode('ASCII')
+ return utf8_bytes.decode().replace('\x00', '').encode('ASCII')
else:
return data
@@ -2172,31 +2173,31 @@ def _default_pad(self, data_type, numElems):
'''
order = self._convert_option()
if (data_type == 1) or (data_type == 41):
- pad_value = struct.pack(order+'b', -127)
+ pad_value = struct.pack(order + 'b', -127)
elif data_type == 2:
- pad_value = struct.pack(order+'h', -32767)
+ pad_value = struct.pack(order + 'h', -32767)
elif data_type == 4:
- pad_value = struct.pack(order+'i', -2147483647)
+ pad_value = struct.pack(order + 'i', -2147483647)
elif (data_type == 8) or (data_type == 33):
- pad_value = struct.pack(order+'q', -9223372036854775807)
+ pad_value = struct.pack(order + 'q', -9223372036854775807)
elif data_type == 11:
- pad_value = struct.pack(order+'B', 254)
+ pad_value = struct.pack(order + 'B', 254)
elif data_type == 12:
- pad_value = struct.pack(order+'H', 65534)
+ pad_value = struct.pack(order + 'H', 65534)
elif data_type == 14:
- pad_value = struct.pack(order+'I', 4294967294)
+ pad_value = struct.pack(order + 'I', 4294967294)
elif (data_type == 21) or (data_type == 44):
- pad_value = struct.pack(order+'f', -1.0E30)
+ pad_value = struct.pack(order + 'f', -1.0E30)
elif (data_type == 22) or (data_type == 45):
- pad_value = struct.pack(order+'d', -1.0E30)
+ pad_value = struct.pack(order + 'd', -1.0E30)
elif (data_type == 31):
- pad_value = struct.pack(order+'d', 0.0)
+ pad_value = struct.pack(order + 'd', 0.0)
elif (data_type == 32):
- pad_value = struct.pack(order+'2d', *[0.0, 0.0])
+ pad_value = struct.pack(order + '2d', *[0.0, 0.0])
elif (data_type == 51) or (data_type == 52):
- tmpPad = str(' '*numElems).encode()
+ tmpPad = str(' ' * numElems).encode()
form = str(numElems)
- pad_value = struct.pack(form+'b', *tmpPad)
+ pad_value = struct.pack(form + 'b', *tmpPad)
return pad_value
def _convert_data(self, data_type, num_elems, num_values, indata):
@@ -2239,12 +2240,12 @@ def _convert_data(self, data_type, num_elems, num_values, indata):
else:
size2 = 1
odata += adata.ljust(num_elems, '\x00')
- recs = int((size*size2)/num_values)
+ recs = int((size * size2) / num_values)
return recs, odata.encode()
else:
tofrom = self._convert_option()
dt_string = self._convert_type(data_type)
- recs = int(size/num_values)
+ recs = int(size / num_values)
if (data_type == self.CDF_EPOCH16 and
isinstance(indata[0], complex)):
complex_data = []
@@ -2256,7 +2257,7 @@ def _convert_data(self, data_type, num_elems, num_values, indata):
indata = complex_data
if (data_type == self.CDF_EPOCH16 and
not isinstance(indata[0], complex)):
- recs = int(recs/2)
+ recs = int(recs / 2)
form = tofrom + str(size) + dt_string
return recs, struct.pack(form, *indata)
elif (isinstance(indata, bytes)):
@@ -2299,22 +2300,22 @@ def _convert_data(self, data_type, num_elems, num_values, indata):
adata = ''
size2 = 1
odata += str(adata).ljust(num_elems, '\x00')
- recs = int((size * size2)/num_values)
+ recs = int((size * size2) / num_values)
return recs, odata.encode()
else:
tofrom = self._convert_option()
npdata = self._convert_nptype(data_type, indata)
if indata.size == 0: # Check if the data being read in is zero size
recs = 0
- elif indata.size == num_values*num_elems: # Check if only one record is being read in
+ elif indata.size == num_values * num_elems: # Check if only one record is being read in
recs = 1
else:
recs = len(indata)
dt_string = self._convert_type(data_type)
if (data_type == self.CDF_EPOCH16):
num_elems = 2 * num_elems
- form = str(recs*num_values*num_elems) + dt_string
- form2 = tofrom + str(recs*num_values*num_elems) + dt_string
+ form = str(recs * num_values * num_elems) + dt_string
+ form2 = tofrom + str(recs * num_values * num_elems) + dt_string
datau = struct.unpack(form, npdata)
return recs, struct.pack(form2, *datau)
elif (isinstance(indata, str)):
@@ -2339,8 +2340,8 @@ def _convert_data(self, data_type, num_elems, num_values, indata):
complex_data.append(indata.real)
complex_data.append(indata.imag)
indata = complex_data
- form = tofrom + str(recs*num_values*num_elems) + dt_string
- if (recs*num_values*num_elems > 1):
+ form = tofrom + str(recs * num_values * num_elems) + dt_string
+ if (recs * num_values * num_elems > 1):
return recs, struct.pack(form, *indata)
else:
return recs, struct.pack(form, indata)
@@ -2363,7 +2364,7 @@ def _num_values(self, zVar: bool, varNum):
return values
else:
for x in range(0, numDims):
- if (zVar == True):
+ if (zVar):
values = values * dimSizes[x]
else:
if (dimVary[x] != 0):
@@ -2414,13 +2415,13 @@ def _update_aedr_link(self, f, attrNum, zVar, varNum, offset):
# Get the number of entries
if zVar:
- f.seek(adr_offset+56, 0)
+ f.seek(adr_offset + 56, 0)
# ADR's NzEntries
entries = int.from_bytes(f.read(4), 'big', signed=True)
# ADR's MAXzEntry
maxEntry = int.from_bytes(f.read(4), 'big', signed=True)
else:
- f.seek(adr_offset+36, 0)
+ f.seek(adr_offset + 36, 0)
# ADR's NgrEntries
entries = int.from_bytes(f.read(4), 'big', signed=True)
# ADR's MAXgrEntry
@@ -2430,59 +2431,59 @@ def _update_aedr_link(self, f, attrNum, zVar, varNum, offset):
# If this is the first entry, update the ADR to reflect
if zVar:
# AzEDRhead
- self._update_offset_value(f, adr_offset+48, 8, offset)
+ self._update_offset_value(f, adr_offset + 48, 8, offset)
# NzEntries
- self._update_offset_value(f, adr_offset+56, 4, 1)
+ self._update_offset_value(f, adr_offset + 56, 4, 1)
# MaxzEntry
- self._update_offset_value(f, adr_offset+60, 4, varNum)
+ self._update_offset_value(f, adr_offset + 60, 4, varNum)
else:
# AgrEDRhead
- self._update_offset_value(f, adr_offset+20, 8, offset)
+ self._update_offset_value(f, adr_offset + 20, 8, offset)
# NgrEntries
- self._update_offset_value(f, adr_offset+36, 4, 1)
+ self._update_offset_value(f, adr_offset + 36, 4, 1)
# MaxgrEntry
- self._update_offset_value(f, adr_offset+40, 4, varNum)
+ self._update_offset_value(f, adr_offset + 40, 4, varNum)
else:
if zVar:
- f.seek(adr_offset+48, 0)
+ f.seek(adr_offset + 48, 0)
head = int.from_bytes(f.read(8), 'big', signed=True)
else:
- f.seek(adr_offset+20, 0)
+ f.seek(adr_offset + 20, 0)
head = int.from_bytes(f.read(8), 'big', signed=True)
aedr = head
previous_aedr = head
done = False
# For each entry, re-adjust file offsets if needed
for _ in range(0, entries):
- f.seek(aedr+28, 0)
+ f.seek(aedr + 28, 0)
# Get variable number for entry
num = int.from_bytes(f.read(4), 'big', signed=True)
if (num > varNum):
# insert an aedr to the chain
# AEDRnext
- self._update_offset_value(f, previous_aedr+12, 8, offset)
+ self._update_offset_value(f, previous_aedr + 12, 8, offset)
# AEDRnext
- self._update_offset_value(f, offset+12, 8, aedr)
+ self._update_offset_value(f, offset + 12, 8, aedr)
done = True
break
else:
# move to the next aedr in chain
- f.seek(aedr+12, 0)
+ f.seek(aedr + 12, 0)
previous_aedr = aedr
aedr = int.from_bytes(f.read(8), 'big', signed=True)
# If no link was made, update the last found aedr
if not done:
- self._update_offset_value(f, previous_aedr+12, 8, offset)
+ self._update_offset_value(f, previous_aedr + 12, 8, offset)
if zVar:
- self._update_offset_value(f, adr_offset+56, 4, entries+1)
+ self._update_offset_value(f, adr_offset + 56, 4, entries + 1)
if (maxEntry < varNum):
- self._update_offset_value(f, adr_offset+60, 4, varNum)
+ self._update_offset_value(f, adr_offset + 60, 4, varNum)
else:
- self._update_offset_value(f, adr_offset+36, 4, entries+1)
+ self._update_offset_value(f, adr_offset + 36, 4, entries + 1)
if (maxEntry < varNum):
- self._update_offset_value(f, adr_offset+40, 4, varNum)
+ self._update_offset_value(f, adr_offset + 40, 4, varNum)
@staticmethod
def _set_bit(value, bit):
@@ -2559,7 +2560,7 @@ def _make_blocks(records):
# Find the location in the records before the next gap
# Call this value "y"
- while ((y+1) < total):
+ while ((y + 1) < total):
y = y + 1
nextnum = records[y]
diff = nextnum - recnum
@@ -2572,8 +2573,8 @@ def _make_blocks(records):
# Put the values of the records into "ablock", append to sparse_blocks
ablock = []
ablock.append(recstart)
- if ((y+1) == total):
- recend = records[total-1]
+ if ((y + 1) == total):
+ recend = records[total - 1]
else:
recend = records[y]
x = y + 1
@@ -2705,7 +2706,7 @@ def _make_sparse_blocks_with_virtual(self, variable, records, data):
asparse.append(sblock[0])
asparse.append(sblock[1])
starting = sblock[0]
- ending = sblock[1]+1
+ ending = sblock[1] + 1
asparse.append(data[starting:ending])
sparse_data.append(asparse)
return sparse_data
@@ -2719,8 +2720,8 @@ def _make_sparse_blocks_with_virtual(self, variable, records, data):
asparse = []
asparse.append(sblock[0])
asparse.append(sblock[1])
- starting = sblock[0]*y
- ending = (sblock[1]+1)*y
+ starting = sblock[0] * y
+ ending = (sblock[1] + 1) * y
asparse.append(data[starting:ending])
sparse_data.append(asparse)
return sparse_data
@@ -2734,7 +2735,7 @@ def _make_sparse_blocks_with_virtual(self, variable, records, data):
datax = []
ist = sblock[0]
for z in range(0, records):
- datax.append(data[ist+z])
+ datax.append(data[ist + z])
asparse.append(datax)
sparse_data.append(asparse)
return sparse_data
@@ -2802,6 +2803,6 @@ def getVersion():
where mon is a 3-character month.
"""
- print('CDFwrite version:', str(self.version) + '.'+str(self.release) +
+ print('CDFwrite version:', str(self.version) + '.' + str(self.release) +
'.' + str(self.increment))
print('Date: 2018/01/11')
diff --git a/cdflib/epochs.py b/cdflib/epochs.py
index fda7a58..45efd1a 100644
--- a/cdflib/epochs.py
+++ b/cdflib/epochs.py
@@ -382,7 +382,7 @@ def encode_tt2000(tt2000, iso_8601: bool = True): # @NoSelf
# dd-mmm-yyyy hh:mm:ss.mmm.uuu.nnn
encoded = str(ld).zfill(2)
encoded += '-'
- encoded += CDFepoch.month_Token[lm-1]
+ encoded += CDFepoch.month_Token[lm - 1]
encoded += '-'
encoded += str(ly).zfill(4)
encoded += ' '
@@ -469,7 +469,6 @@ def breakdown_tt2000(tt2000, to_np: bool = False):
xdates[4, post72 & ~datxzero] = np.rint(xdates[5, post72 & ~datxzero] / 60.0)
xdates[5, post72 & ~datxzero] = xdates[5, post72 & ~datxzero] % 60
-
# Set toutcs, then loop through and correct for pre-1972
toutcs[:6, :] = xdates[:6, :]
@@ -490,7 +489,7 @@ def breakdown_tt2000(tt2000, to_np: bool = False):
dat0 = CDFepoch._LeapSecondsfromYMD(xdate[0],
xdate[1], xdate[2])
tmpx = t2 - int(dat0 * CDFepoch.SECinNanoSecs)
- tmpy = int(float(tmpx/CDFepoch.SECinNanoSecsD))
+ tmpy = int(float(tmpx / CDFepoch.SECinNanoSecsD))
nansec = int(tmpx - tmpy * CDFepoch.SECinNanoSecs)
if (nansec < 0):
nansec = CDFepoch.SECinNanoSecs + nansec
@@ -731,7 +730,7 @@ def _LeapSecondsfromJ2000(nanosecs): # @NoSelf
idxs = (j == -1) & (nanosecs >= CDFepoch.NST[i])
j[idxs] = i
if (i < (CDFepoch.NDAT - 1)):
- overflow = nanosecs + 1000000000 >= CDFepoch.NST[i+1]
+ overflow = nanosecs + 1000000000 >= CDFepoch.NST[i + 1]
da[overflow, 1] = 1.0
if np.all(j > 0):
break
@@ -797,7 +796,7 @@ def epochrange_tt2000(epochs, starttime=None, endtime=None): # @NoSelf
raise ValueError('Bad data')
else:
raise ValueError('Bad data')
- if (starttime == None):
+ if (starttime is None):
stime = int(-9223372036854775807)
else:
if (isinstance(starttime, int) or isinstance(starttime, np.int64)):
@@ -806,7 +805,7 @@ def epochrange_tt2000(epochs, starttime=None, endtime=None): # @NoSelf
stime = CDFepoch.compute_tt2000(starttime)
else:
raise ValueError('Bad start time')
- if (endtime != None):
+ if (endtime is not None):
if (isinstance(endtime, int) or isinstance(endtime, np.int64)):
etime = endtime
elif (isinstance(endtime, list) or isinstance(endtime, tuple)):
@@ -876,7 +875,7 @@ def _encodex_epoch16(epoch16, iso_8601: bool = True) -> str:
# dd-mmm-year hh:mm:ss.mmm.uuu.nnn.ppp
encoded = str(components[2]).zfill(2)
encoded += '-'
- encoded += CDFepoch.month_Token[components[1]-1]
+ encoded += CDFepoch.month_Token[components[1] - 1]
encoded += '-'
encoded += str(components[0]).zfill(4)
encoded += ' '
@@ -1057,7 +1056,7 @@ def compute_epoch16(datetimes, to_np: bool = False):
usec, nsec, psec)
if (month == 0):
daysSince0AD = CDFepoch._JulianDay(
- year, 1, 1) + (day-1) - 1721060
+ year, 1, 1) + (day - 1) - 1721060
else:
daysSince0AD = CDFepoch._JulianDay(
year, month, day) - 1721060
@@ -1175,7 +1174,7 @@ def breakdown_epoch16(epochs, to_np: bool = False):
"""
if (isinstance(epochs, (complex, np.complex128))
- or isinstance(epochs, (list, tuple, np.ndarray))):
+ or isinstance(epochs, (list, tuple, np.ndarray))):
new_epochs = np.asarray(epochs)
if new_epochs.shape == ():
cshape = []
@@ -1210,7 +1209,7 @@ def breakdown_epoch16(epochs, to_np: bool = False):
def _computeEpoch16(y, m, d, h, mn, s, ms, msu, msn, msp): # @NoSelf
if (m == 0):
- daysSince0AD = CDFepoch._JulianDay(y, 1, 1) + (d-1) - 1721060
+ daysSince0AD = CDFepoch._JulianDay(y, 1, 1) + (d - 1) - 1721060
else:
if (m < 0):
y = y - 1
@@ -1258,7 +1257,7 @@ def epochrange_epoch16(epochs, starttime=None, endtime=None): # @NoSelf
raise ValueError('Bad data')
else:
raise ValueError('Bad data')
- if (starttime == None):
+ if (starttime is None):
stime = []
stime.append(-1.0E31)
stime.append(-1.0E31)
@@ -1275,7 +1274,7 @@ def epochrange_epoch16(epochs, starttime=None, endtime=None): # @NoSelf
stime.append(sstime.imag)
else:
raise ValueError('Bad start time')
- if (endtime != None):
+ if (endtime is not None):
if (isinstance(endtime, complex) or
isinstance(endtime, np.complex128)):
etime = []
@@ -1304,21 +1303,21 @@ def epochrange_epoch16(epochs, starttime=None, endtime=None): # @NoSelf
if (epoch16[0] > etime[0] or (epoch16[0] == etime[0] and
epoch16[1] > etime[1])):
return
- if (epoch16[count-2] < stime[0] or
- (epoch16[count-2] == stime[0] and
- epoch16[count-1] < stime[1])):
+ if (epoch16[count - 2] < stime[0] or
+ (epoch16[count - 2] == stime[0] and
+ epoch16[count - 1] < stime[1])):
return
for x in range(0, count, 2):
if (epoch16[x] < stime[0]):
continue
elif (epoch16[x] == stime[0]):
- if (epoch16[x+1] < stime[1]):
+ if (epoch16[x + 1] < stime[1]):
continue
else:
- indx.append(int(x/2))
+ indx.append(int(x / 2))
break
else:
- indx.append(int(x/2))
+ indx.append(int(x / 2))
break
if (len(indx) == 0):
indx.append(0)
@@ -1327,17 +1326,17 @@ def epochrange_epoch16(epochs, starttime=None, endtime=None): # @NoSelf
if (epoch16[x] < etime[0]):
continue
elif (epoch16[x] == etime[0]):
- if (epoch16[x+1] > etime[1]):
- indx.append(int((x-1)/2))
+ if (epoch16[x + 1] > etime[1]):
+ indx.append(int((x - 1) / 2))
hasadded = True
break
else:
- indx.append(int((x-1)/2))
+ indx.append(int((x - 1) / 2))
hasadded = True
break
if not hasadded:
- indx.append(int(count/2)-1)
- return np.arange(indx[0], indx[1]+1, step=1)
+ indx.append(int(count / 2) - 1)
+ return np.arange(indx[0], indx[1] + 1, step=1)
@staticmethod
def encode_epoch(epochs, iso_8601: bool = True): # @NoSelf
@@ -1388,7 +1387,7 @@ def _encodex_epoch(epoch, iso_8601: bool = True): # @NoSelf
# dd-mmm-year hh:mm:ss.mmm
encoded = str(components[2]).zfill(2)
encoded += '-'
- encoded += CDFepoch.month_Token[components[1]-1]
+ encoded += CDFepoch.month_Token[components[1] - 1]
encoded += '-'
encoded += str(components[0]).zfill(4)
encoded += ' '
@@ -1487,7 +1486,7 @@ def compute_epoch(dates, to_np: bool = False): # @NoSelf
minute, second, msec))
if (month == 0):
- daysSince0AD = CDFepoch._JulianDay(year, 1, 1) + (day-1) - 1721060
+ daysSince0AD = CDFepoch._JulianDay(year, 1, 1) + (day - 1) - 1721060
else:
daysSince0AD = CDFepoch._JulianDay(year, month, day) - 1721060
if (hour == 0 and minute == 0 and second == 0):
@@ -1496,10 +1495,10 @@ def compute_epoch(dates, to_np: bool = False): # @NoSelf
msecInDay = (3600000 * hour) + (60000 * minute) + (1000 * second) + msec
if (count == 1):
if not to_np:
- return (86400000.0*daysSince0AD+msecInDay)
+ return (86400000.0 * daysSince0AD + msecInDay)
else:
- return np.array(86400000.0*daysSince0AD+msecInDay)
- epochs.append(86400000.0*daysSince0AD+msecInDay)
+ return np.array(86400000.0 * daysSince0AD + msecInDay)
+ epochs.append(86400000.0 * daysSince0AD + msecInDay)
if not to_np:
return epochs
else:
@@ -1508,7 +1507,7 @@ def compute_epoch(dates, to_np: bool = False): # @NoSelf
def _computeEpoch(y, m, d, h, mn, s, ms): # @NoSelf
if (m == 0):
- daysSince0AD = CDFepoch._JulianDay(y, 1, 1) + (d-1) - 1721060
+ daysSince0AD = CDFepoch._JulianDay(y, 1, 1) + (d - 1) - 1721060
else:
if (m < 0):
--y
@@ -1550,7 +1549,7 @@ def breakdown_epoch(epochs, to_np: bool = False):
# Test input and cast it as an array of floats
if (isinstance(epochs, float) or isinstance(epochs, np.float64)
or isinstance(epochs, list) or isinstance(epochs, tuple)
- or isinstance(epochs, np.ndarray) or isinstance(epochs, int)):
+ or isinstance(epochs, np.ndarray) or isinstance(epochs, int)):
new_epochs = np.asarray(epochs).astype(float)
if new_epochs.shape == ():
cshape = []
@@ -1710,7 +1709,7 @@ def _parse_epoch(value): # @NoSelf
# CDF_EPOCH16
if value.lower() in ('31-dec-9999 23:59:59.999.999.999.999',
'9999-12-31t23:59:59.999999999999'):
- return -1.0E31-1.0E31j
+ return -1.0E31 - 1.0E31j
else:
if (len(value) == 36):
date = re.findall(r'(\d+)\-(.+)\-(\d+) (\d+)\:(\d+)\:(\d+)\.(\d+)\.(\d+)\.(\d+)\.(\d+)',
@@ -1816,7 +1815,7 @@ def getVersion(): # @NoSelf
Prints the code version.
"""
print('epochs version:', str(CDFepoch.version) + '.' +
- str(CDFepoch.release) + '.'+str(CDFepoch.increment))
+ str(CDFepoch.release) + '.' + str(CDFepoch.increment))
def getLeapSecondLastUpdated(): # @NoSelf
"""
diff --git a/cdflib/epochs_astropy.py b/cdflib/epochs_astropy.py
index bc92c83..adcb75b 100644
--- a/cdflib/epochs_astropy.py
+++ b/cdflib/epochs_astropy.py
@@ -78,7 +78,7 @@ def convert_to_astropy(epochs, format=None):
if t in (int, np.int64):
return Time(epochs, format='cdf_tt2000', precision=9)
elif t in (complex, np.complex128):
- return Time(epochs.real, epochs.imag/1000000000000.0, format='cdf_epoch16', precision=9)
+ return Time(epochs.real, epochs.imag / 1000000000000.0, format='cdf_epoch16', precision=9)
elif t in (float, np.float64):
return Time(epochs, format='cdf_epoch', precision=9)
else:
@@ -131,15 +131,15 @@ def compute(datetimes, to_np: bool = False): # @NoSelf
for d in datetimes:
unix_seconds = datetime.datetime(d[0], d[1], d[2], d[3], d[4], d[5]).replace(tzinfo=timezone.utc).timestamp()
if len(d) == 7:
- remainder_seconds = (d[6]/1000.0)
+ remainder_seconds = (d[6] / 1000.0)
astrotime = Time(unix_seconds, remainder_seconds, format='unix', precision=9)
cdf_time.append(astrotime.cdf_epoch)
if len(d) == 9:
- remainder_seconds = (d[6]/1000.0) + (d[7]/1000000.0) + (d[8]/1000000000.0)
+ remainder_seconds = (d[6] / 1000.0) + (d[7] / 1000000.0) + (d[8] / 1000000000.0)
astrotime = Time(unix_seconds, remainder_seconds, format='unix', precision=9)
cdf_time.append(astrotime.cdf_tt2000)
if len(d) == 10:
- remainder_seconds = (d[6]/1000.0) + (d[7]/1000000.0) + (d[8]/1000000000.0) + (d[9]/1000000000000.0)
+ remainder_seconds = (d[6] / 1000.0) + (d[7] / 1000000.0) + (d[8] / 1000000000.0) + (d[9] / 1000000000000.0)
astrotime = Time(unix_seconds, remainder_seconds, format='unix', precision=9)
cdf_time.append(astrotime.cdf_epoch16)
if to_np:
@@ -170,7 +170,7 @@ def breakdown_tt2000(tt2000, to_np: bool = False):
date, time = t.split(" ")
yyyy, mon, dd = date.split("-")
hhmmss, decimal_seconds = time.split(".")
- decimal_seconds = "."+decimal_seconds
+ decimal_seconds = "." + decimal_seconds
hh, mm, ss = hhmmss.split(":")
time_as_list = []
time_as_list.append(int(yyyy)) # year
@@ -180,7 +180,7 @@ def breakdown_tt2000(tt2000, to_np: bool = False):
time_as_list.append(int(mm)) # minute
time_as_list.append(int(ss)) # second
decimal_seconds = float(decimal_seconds)
- milliseconds = decimal_seconds*1000
+ milliseconds = decimal_seconds * 1000
time_as_list.append(int(milliseconds)) # milliseconds
microseconds = (milliseconds % 1) * 1000
time_as_list.append(int(microseconds)) # microseconds
@@ -211,7 +211,7 @@ def breakdown_epoch16(epochs, to_np: bool = False): # @NoSelf
time_as_list.append(int(mm)) # minute
time_as_list.append(int(ss)) # second
decimal_seconds = float(decimal_seconds)
- milliseconds = decimal_seconds*1000
+ milliseconds = decimal_seconds * 1000
time_as_list.append(int(milliseconds)) # milliseconds
microseconds = (milliseconds % 1) * 1000
time_as_list.append(int(microseconds)) # microseconds
@@ -243,7 +243,7 @@ def breakdown_epoch(epochs, to_np: bool = False): # @NoSelf
time_as_list.append(int(mm)) # minute
time_as_list.append(int(ss)) # second
decimal_seconds = float(decimal_seconds)
- milliseconds = decimal_seconds*1000
+ milliseconds = decimal_seconds * 1000
time_as_list.append(int(milliseconds)) # milliseconds
times.append(time_as_list)
return times
@@ -294,4 +294,4 @@ def getVersion():
Prints the code version.
"""
print('epochs version:', str(CDFAstropy.version) + '.' +
- str(CDFAstropy.release) + '.'+str(CDFAstropy.increment))
+ str(CDFAstropy.release) + '.' + str(CDFAstropy.increment))
diff --git a/cdflib/xarray_to_cdf.py b/cdflib/xarray_to_cdf.py
index ae7b237..0cc6bc6 100644
--- a/cdflib/xarray_to_cdf.py
+++ b/cdflib/xarray_to_cdf.py
@@ -13,19 +13,19 @@ def _dtype_to_cdf_type(var):
epoch_regex_1 = re.compile('epoch$')
epoch_regex_2 = re.compile('epoch_[0-9]+$')
if epoch_regex_1.match(var.name.lower()) or epoch_regex_2.match(var.name.lower()):
- return 33, 1 # CDF_EPOCH_TT2000
+ return 33, 1 # CDF_EPOCH_TT2000
if var.dtype == np.int8 or var.dtype == np.int16 or var.dtype == np.int32 or var.dtype == np.int64:
- return 8, 1 #'CDF_INT8'
+ return 8, 1 # 'CDF_INT8'
elif var.dtype == np.float64 or var.dtype == np.float32 or var.dtype == np.float16:
- return 45, 1 #'CDF_DOUBLE'
+ return 45, 1 # 'CDF_DOUBLE'
elif var.dtype == np.uint8 or var.dtype == np.uint16 or var.dtype == np.uint32 or var.dtype == np.uint64:
- return 14, 1 #'CDF_UNIT4'
+ return 14, 1 # 'CDF_UNIT4'
elif var.dtype.type == np.str_:
- return 51, int(var.dtype.str[2:]) # CDF_CHAR, and the length of the longest string in the numpy array
- elif var.dtype.type == np.bytes_: # Bytes are usually strings
- return 51, int(var.dtype.str[2:]) # CDF_CHAR, and the length of the longest string in the numpy array
- elif var.dtype == np.object: # This commonly means we have multidimensional arrays of strings
+ return 51, int(var.dtype.str[2:]) # CDF_CHAR, and the length of the longest string in the numpy array
+ elif var.dtype.type == np.bytes_: # Bytes are usually strings
+ return 51, int(var.dtype.str[2:]) # CDF_CHAR, and the length of the longest string in the numpy array
+ elif var.dtype == np.object: # This commonly means we have multidimensional arrays of strings
try:
longest_string = 0
for x in np.nditer(var.data, flags=['refs_ok']):
@@ -33,7 +33,8 @@ def _dtype_to_cdf_type(var):
longest_string = len(str(x))
return 51, longest_string
except Exception as e:
- print(f'NOT SUPPORTED: Data in variable {var.name} has data type {var.dtype}. Attempting to convert it to strings ran into the error: {str(e)}')
+ print(
+ f'NOT SUPPORTED: Data in variable {var.name} has data type {var.dtype}. Attempting to convert it to strings ran into the error: {str(e)}')
return 51, 1
elif var.dtype.type == np.datetime64:
return 33, 1
@@ -41,16 +42,17 @@ def _dtype_to_cdf_type(var):
print(f'NOT SUPPORTED: Data in variable {var.name} has data type of {var.dtype}.')
return 51, 1
+
def _dtype_to_fillval(dtype):
if dtype == np.int8 or dtype == np.int16 or dtype == np.int32 or dtype == np.int64:
- return -9223372036854775808 # Default FILLVAL of 'CDF_INT8'
+ return -9223372036854775808 # Default FILLVAL of 'CDF_INT8'
elif dtype == np.float64 or dtype == np.float32 or dtype == np.float16:
- return -1e30 #Default FILLVAL of 'CDF_DOUBLE'
+ return -1e30 # Default FILLVAL of 'CDF_DOUBLE'
elif dtype == np.uint8 or dtype == np.uint16 or dtype == np.uint32 or dtype == np.uint64:
- return 4294967294 #Default FILLVAL of 'CDF_UNIT4'
+ return 4294967294 # Default FILLVAL of 'CDF_UNIT4'
elif dtype.type == np.str_:
- return " " # Default FILLVAL of 'CDF_CHAR'
+ return " " # Default FILLVAL of 'CDF_CHAR'
else:
print(f'Data type of {dtype} not supported')
@@ -63,20 +65,24 @@ def _verify_depend_dimensions(dataset, dimension_number, primary_variable_name,
coordinate_data = np.array(dataset[coordinate_variable_name])
if len(primary_data.shape) != 0 and len(coordinate_data.shape) == 0:
- print(f'ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but the dimensions do not match.')
+ print(
+ f'ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but the dimensions do not match.')
return False
if len(coordinate_data.shape) != 0 and len(primary_data.shape) == 0:
- print(f'ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but the dimensions do not match.')
+ print(
+ f'ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but the dimensions do not match.')
return False
if len(coordinate_data.shape) > 2:
- print(f'ISTP Compliance Warning: {coordinate_variable_name} has too many dimensions to be the DEPEND_{dimension_number} for variable {primary_variable_name}')
+ print(
+ f'ISTP Compliance Warning: {coordinate_variable_name} has too many dimensions to be the DEPEND_{dimension_number} for variable {primary_variable_name}')
return False
if len(coordinate_data.shape) == 2:
if primary_data.shape[0] != coordinate_data.shape[0]:
- print(f'ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but the Epoch dimensions do not match.')
+ print(
+ f'ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but the Epoch dimensions do not match.')
return False
if len(primary_data.shape) <= dimension_number:
@@ -84,7 +90,8 @@ def _verify_depend_dimensions(dataset, dimension_number, primary_variable_name,
return False
if primary_data.shape[dimension_number] != coordinate_data.shape[-1]:
- print(f'ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but the dimensions do not match.')
+ print(
+ f'ISTP Compliance Warning: {coordinate_variable_name} is listed as the DEPEND_{dimension_number} for variable {primary_variable_name}, but the dimensions do not match.')
return False
return True
@@ -111,12 +118,14 @@ def _dimension_checker(dataset):
# Determine the variable type (data, support_data, metadata, ignore_data)
if 'VAR_TYPE' not in dataset[var].attrs:
- print(f'ISTP Compliance Warning: Variable {var} does not have an attribute VAR_TYPE to describe the variable. Attributes must be either data, support_data, metadata, or ignore_data.')
+ print(
+ f'ISTP Compliance Warning: Variable {var} does not have an attribute VAR_TYPE to describe the variable. Attributes must be either data, support_data, metadata, or ignore_data.')
var_type = None
else:
var_type = dataset[var].attrs['VAR_TYPE']
if var_type.lower() not in ('data', 'support_data', 'metadata', 'ignore_data'):
- print(f'ISTP Compliance Warning: Variable {var} attribute VAR_TYPE is not set to either data, support_data, metadata, or ignore_data.')
+ print(
+ f'ISTP Compliance Warning: Variable {var} attribute VAR_TYPE is not set to either data, support_data, metadata, or ignore_data.')
var_type = None
# Determine ISTP compliant variables
@@ -127,18 +136,20 @@ def _dimension_checker(dataset):
if _verify_depend_dimensions(dataset, int(att[-1]), var, depend_i):
istp_depend_dimension_list.append(dataset[var].attrs[att])
else:
- print(f'ISTP Compliance Warning: variable {var} listed {dataset[var].attrs[att]} as its {att}. However, it was not found in the dataset.')
+ print(
+ f'ISTP Compliance Warning: variable {var} listed {dataset[var].attrs[att]} as its {att}. However, it was not found in the dataset.')
# Determine potential dimension (non-epoch) variables
potential_depend_dims = dataset[var].dims[1:]
i = 1
for d in potential_depend_dims:
depend_dimension_list.append(d)
- if d not in dataset: # Check if the dimension is in the coordinates themselves
+ if d not in dataset: # Check if the dimension is in the coordinates themselves
if var_type is not None and var_type.lower() == 'data':
if f'DEPEND_{i}' not in dataset[var].attrs:
- print(f'ISTP Compliance Warning: variable {var} contains a dimension {d} that is not defined in xarray. '
- f'Specify one of the other xarray DataArrays as the DEPEND_{i} attribute.')
+ print(
+ f'ISTP Compliance Warning: variable {var} contains a dimension {d} that is not defined in xarray. '
+ f'Specify one of the other xarray DataArrays as the DEPEND_{i} attribute.')
i += 1
depend_dimension_list = list(set(depend_dimension_list))
@@ -163,6 +174,7 @@ def _recheck_dimensions_after_epoch_checker(dataset, time_varying_dimensions, di
return depend_dimension_list
+
def _epoch_checker(dataset, dim_vars):
# This holds the list of epoch variables
@@ -207,12 +219,14 @@ def _epoch_checker(dataset, dim_vars):
depend_0_list.append(potential_depend_0)
time_varying_dimensions.append(var)
else:
- print(f'ISTP Compliance Warning: variable {var} contained a "record" dimension {potential_depend_0}, but they have different dimensions.')
+ print(
+ f'ISTP Compliance Warning: variable {var} contained a "record" dimension {potential_depend_0}, but they have different dimensions.')
elif epoch_regex_1.match(var.lower()) or epoch_regex_2.match(var.lower()):
depend_0_list.append(potential_depend_0)
time_varying_dimensions.append(var)
else:
- print(f'ISTP Compliance Warning: variable {var} contained an "record" dimension {potential_depend_0}, but it was not found in the data set.')
+ print(
+ f'ISTP Compliance Warning: variable {var} contained an "record" dimension {potential_depend_0}, but it was not found in the data set.')
depend_0_list = list(set(depend_0_list))
@@ -222,7 +236,7 @@ def _epoch_checker(dataset, dim_vars):
epoch_found = False
for d in depend_0_list:
if d.lower().startswith('epoch'):
- epoch_found=True
+ epoch_found = True
if not epoch_found:
print(f'ISTP Compliance Warning: There is no variable named Epoch. Epoch is the required name of a DEPEND_0 attribute.')
@@ -242,12 +256,12 @@ def _add_depend_variables_to_dataset(dataset, dim_vars, depend_0_vars, time_vary
if var in time_varying_dimensions:
if 'DEPEND_0' not in dataset[var].attrs:
depend_0 = dataset[var].dims[0]
- #if 'VAR_TYPE' in dataset[var].attrs and dataset[var].attrs['VAR_TYPE'].lower() == 'data':
+ # if 'VAR_TYPE' in dataset[var].attrs and dataset[var].attrs['VAR_TYPE'].lower() == 'data':
# depend_0 = first_dim_name
- #elif 'VAR_TYPE' in dataset[var].attrs and dataset[var].attrs['VAR_TYPE'].lower() == 'support_data' and len(
+ # elif 'VAR_TYPE' in dataset[var].attrs and dataset[var].attrs['VAR_TYPE'].lower() == 'support_data' and len(
# dataset[var].dims) > 1:
# depend_0 = first_dim_name
- #else:
+ # else:
# depend_0 = None
if depend_0 is not None and depend_0 in depend_0_vars and var != depend_0:
@@ -338,7 +352,8 @@ def _variable_attribute_checker(dataset, epoch_list):
if d[var].attrs['LABL_PTR_1'] in dataset or d[var].attrs['LABL_PTR_1'] in dataset.coords:
pass
else:
- print(f'ISTP Compliance Warning: LABL_PTR_1 attribute for variable {var} does not point to an existing variable.')
+ print(
+ f'ISTP Compliance Warning: LABL_PTR_1 attribute for variable {var} does not point to an existing variable.')
else:
print(f'ISTP Compliance Warning: LABLAXIS or LABL_PTR_1 attribute is required for variable {var}')
@@ -348,7 +363,8 @@ def _variable_attribute_checker(dataset, epoch_list):
print(f'ISTP Compliance Warning: UNITS or UNIT_PTR attribute is required for variable {var}')
else:
if d[var].attrs['UNIT_PTR'] not in dataset:
- print(f'ISTP Compliance Warning: UNIT_PTR attribute for variable {var} does not point to an existing variable.')
+ print(
+ f'ISTP Compliance Warning: UNIT_PTR attribute for variable {var} does not point to an existing variable.')
if 'VALIDMIN' not in d[var].attrs:
if var_type.lower() == 'data':
@@ -427,7 +443,7 @@ def _unixtime_to_tt2000(unixtime_data):
dt.hour,
dt.minute,
dt.second,
- int(dt.microsecond/1000),
+ int(dt.microsecond / 1000),
int(dt.microsecond % 1000),
0]
converted_data = cdfepoch.compute(dt_to_convert)
@@ -449,14 +465,15 @@ def _datetime_to_tt2000(datetime_data):
dd.hour,
dd.minute,
dd.second,
- int(dd.microsecond/1000),
+ int(dd.microsecond / 1000),
int(dd.microsecond % 1000),
0]
np.append(tt2000_data, cdfepoch.compute(dd_to_convert))
return tt2000_data
-def xarray_to_cdf(xarray_dataset, file_name, from_unixtime=False, from_datetime=False, istp=True, record_dimensions=[], compression=0):
+def xarray_to_cdf(xarray_dataset, file_name, from_unixtime=False, from_datetime=False,
+ istp=True, record_dimensions=[], compression=0):
"""
This function converts XArray Dataset objects into CDF files.
@@ -655,7 +672,7 @@ def xarray_to_cdf(xarray_dataset, file_name, from_unixtime=False, from_datetime=
elif from_datetime:
var_data = _datetime_to_tt2000(d[var].data)
elif cdf_data_type == 33:
- unixtime_from_datetime64 = d[var].data.astype('int64')/1000000000.0
+ unixtime_from_datetime64 = d[var].data.astype('int64') / 1000000000.0
var_data = _unixtime_to_tt2000(unixtime_from_datetime64)
x.write_var(var_spec, var_attrs=var_att_dict, var_data=var_data)
diff --git a/doc/introduction.rst b/doc/introduction.rst
index 2ef7818..a2153c6 100644
--- a/doc/introduction.rst
+++ b/doc/introduction.rst
@@ -4,11 +4,11 @@ Introduction
What is cdflib?
------------------
-cdflib is an effort to replicate the CDF libraries using a pure python implementation. This means users do not need to install the `CDF NASA libraries `_.
+cdflib is an effort to replicate the CDF libraries using a pure python implementation. This means users do not need to install the `CDF NASA libraries `_.
The only module you need to install is ``numpy``, but there are a few things you can do with ``astropy`` and ``xarray``.
-While this origally started as a way to read PDS-archive compliant CDF files, thanks to many contributors, it has grown to be able to handle every type of CDF file.
+While this origally started as a way to read PDS-archive compliant CDF files, thanks to many contributors, it has grown to be able to handle every type of CDF file.
What can cdflib do?
diff --git a/doc/modules/apis.rst b/doc/modules/apis.rst
index e31abc8..2ca8c91 100644
--- a/doc/modules/apis.rst
+++ b/doc/modules/apis.rst
@@ -13,4 +13,4 @@ The following documentation is an auto-generated summary of cdflib's API. For m
:no-inheritance-diagram:
.. automodapi:: cdflib.epochs_astropy
- :no-inheritance-diagram:
\ No newline at end of file
+ :no-inheritance-diagram:
diff --git a/doc/modules/cdflib.rst b/doc/modules/cdflib.rst
index 780543f..9b62ea4 100644
--- a/doc/modules/cdflib.rst
+++ b/doc/modules/cdflib.rst
@@ -1,4 +1,4 @@
CDF
===
-.. autofunction:: cdflib.CDF
\ No newline at end of file
+.. autofunction:: cdflib.CDF
diff --git a/doc/modules/cdfread.rst b/doc/modules/cdfread.rst
index b0c14b5..9c52bfb 100644
--- a/doc/modules/cdfread.rst
+++ b/doc/modules/cdfread.rst
@@ -143,4 +143,4 @@ Get epoch range. Returns ``list()`` of the record numbers, representing the corr
getVersion()
-------------
-Shows the code version
\ No newline at end of file
+Shows the code version
diff --git a/doc/modules/cdfwrite.rst b/doc/modules/cdfwrite.rst
index 0aab1ba..193d22b 100644
--- a/doc/modules/cdfwrite.rst
+++ b/doc/modules/cdfwrite.rst
@@ -200,5 +200,3 @@ Sample Usage
>>> var_data=[varrecs,vardata])
>>> cdf_master.close()
>>> cdf_file.close()
-
-
diff --git a/pyproject.toml b/pyproject.toml
index 2f2c683..7ab2e34 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,5 +1,5 @@
[build-system]
-requires = ["setuptools", "wheel"]
+requires = ["setuptools!=61.0.0", "wheel"]
[tool.black]
line-length = 132
diff --git a/scratch.py b/scratch.py
deleted file mode 100644
index d02c244..0000000
--- a/scratch.py
+++ /dev/null
@@ -1,524 +0,0 @@
-#
-import cdflib
-import xarray as xr
-import numpy as np
-from cdflib.epochs import CDFepoch as cdfepoch
-import os
-import urllib.request
-fname = 'mms1_fpi_brst_l2_des-moms_20151016130334_v3.3.0.cdf'
-url = ("https://lasp.colorado.edu/maven/sdc/public/data/sdc/web/cdflib_testing/mms1_fpi_brst_l2_des-moms_20151016130334_v3.3.0.cdf")
-if not os.path.exists(fname):
- urllib.request.urlretrieve(url, fname)
-
-
-a = cdflib.cdf_to_xarray("mms1_fpi_brst_l2_des-moms_20151016130334_v3.3.0.cdf",
- to_unixtime=True, fillval_to_nan=True)
-
-cdflib.xarray_to_cdf(a, 'mms1_fpi_brst_l2_des-moms_20151016130334_v3.3.0-created-from-cdf-input.cdf',
- from_unixtime=True)
-b = cdflib.cdf_to_xarray('mms1_fpi_brst_l2_des-moms_20151016130334_v3.3.0-created-from-cdf-input.cdf',
- to_unixtime=True, fillval_to_nan=True)
-os.remove('mms1_fpi_brst_l2_des-moms_20151016130334_v3.3.0-created-from-cdf-input.cdf')
-os.remove('mms1_fpi_brst_l2_des-moms_20151016130334_v3.3.0.cdf')
-
-fname = 'mms1_fpi_brst_l2_des-moms_20151016130334_v3.3.0.nc'
-url = (
- "https://lasp.colorado.edu/maven/sdc/public/data/sdc/web/cdflib_testing/mms1_fpi_brst_l2_des-moms_20151016130334_v3.3.0.nc")
-if not os.path.exists(fname):
- urllib.request.urlretrieve(url, fname)
-
-c = xr.load_dataset("mms1_fpi_brst_l2_des-moms_20151016130334_v3.3.0.nc")
-
-cdflib.xarray_to_cdf(c, 'mms1_fpi_brst_l2_des-moms_20151016130334_v3.3.0-created-from-netcdf-input.cdf')
-d = cdflib.cdf_to_xarray('mms1_fpi_brst_l2_des-moms_20151016130334_v3.3.0-created-from-netcdf-input.cdf',
- to_unixtime=True, fillval_to_nan=True)
-os.remove('mms1_fpi_brst_l2_des-moms_20151016130334_v3.3.0-created-from-netcdf-input.cdf')
-os.remove('mms1_fpi_brst_l2_des-moms_20151016130334_v3.3.0.nc')
-
-'''
-a = cdflib.cdf_to_xarray("C:/Work/cdf_test_files/mms1_fpi_brst_l2_des-moms_20151016130334_v3.3.0.cdf", to_unixtime=True, fillval_to_nan=True)
-cdflib.xarray_to_cdf(a, 'mms1_fpi_brst_l2_des-moms_20151016130334_v3.3.0-created-from-cdf-input.cdf', from_unixtime=True)
-b = cdflib.cdf_to_xarray('mms1_fpi_brst_l2_des-moms_20151016130334_v3.3.0-created-from-cdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('mms1_fpi_brst_l2_des-moms_20151016130334_v3.3.0-created-from-cdf-input.cdf')
-
-c = xr.load_dataset("C:/Work/cdf_test_files/mms1_fpi_brst_l2_des-moms_20151016130334_v3.3.0.nc")
-cdflib.xarray_to_cdf(c, 'mms1_fpi_brst_l2_des-moms_20151016130334_v3.3.0-created-from-netcdf-input.cdf')
-d = cdflib.cdf_to_xarray('mms1_fpi_brst_l2_des-moms_20151016130334_v3.3.0-created-from-netcdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('mms1_fpi_brst_l2_des-moms_20151016130334_v3.3.0-created-from-netcdf-input.cdf')
-#
-
-#
-import cdflib
-import xarray as xr
-import numpy as np
-from cdflib.epochs import CDFepoch as cdfepoch
-import os
-
-a = cdflib.cdf_to_xarray("C:/Work/cdf_test_files/mms2_epd-eis_srvy_l2_extof_20160809_v3.0.4.cdf", to_unixtime=True, fillval_to_nan=True)
-cdflib.xarray_to_cdf(a, 'mms2_epd-eis_srvy_l2_extof_20160809_v3.0.4-created-from-cdf-input.cdf', from_unixtime=True)
-b = cdflib.cdf_to_xarray('mms2_epd-eis_srvy_l2_extof_20160809_v3.0.4-created-from-cdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('mms2_epd-eis_srvy_l2_extof_20160809_v3.0.4-created-from-cdf-input.cdf')
-
-c = xr.load_dataset("C:/Work/cdf_test_files/mms2_epd-eis_srvy_l2_extof_20160809_v3.0.4.nc")
-cdflib.xarray_to_cdf(c, 'mms2_epd-eis_srvy_l2_extof_20160809_v3.0.4-created-from-netcdf-input.cdf')
-d = cdflib.cdf_to_xarray('mms2_epd-eis_srvy_l2_extof_20160809_v3.0.4-created-from-netcdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('mms2_epd-eis_srvy_l2_extof_20160809_v3.0.4-created-from-netcdf-input.cdf')
-#
-
-#
-
-### GOOD ONE FOR DEMO?!
-
-import cdflib
-import xarray as xr
-import numpy as np
-from cdflib.epochs import CDFepoch as cdfepoch
-import os
-
-a = cdflib.cdf_to_xarray("C:/Work/cdf_test_files/mms2_fgm_srvy_l2_20160809_v4.47.0.cdf", to_unixtime=True, fillval_to_nan=True)
-cdflib.xarray_to_cdf(a, 'mms2_fgm_srvy_l2_20160809_v4.47.0-created-from-cdf-input.cdf', from_unixtime=True)
-b = cdflib.cdf_to_xarray('mms2_fgm_srvy_l2_20160809_v4.47.0-created-from-cdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('mms2_fgm_srvy_l2_20160809_v4.47.0-created-from-cdf-input.cdf')
-
-c = xr.load_dataset("C:/Work/cdf_test_files/mms2_fgm_srvy_l2_20160809_v4.47.0.nc")
-cdflib.xarray_to_cdf(c, 'mms2_fgm_srvy_l2_20160809_v4.47.0-created-from-netcdf-input.cdf')
-d = cdflib.cdf_to_xarray('mms2_fgm_srvy_l2_20160809_v4.47.0-created-from-netcdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('mms2_fgm_srvy_l2_20160809_v4.47.0-created-from-netcdf-input.cdf')
-#
-
-#
-
-### REALLY GOOD EXAMPLE
-
-import cdflib
-import xarray as xr
-import numpy as np
-from cdflib.epochs import CDFepoch as cdfepoch
-import os
-c = xr.load_dataset("C:/Work/cdf_test_files/MGITM_LS180_F130_150615.nc")
-for var in c:
- c[var].attrs['VAR_TYPE'] = 'data'
-c = c.rename({'Latitude':'latitude', 'Longitude':'longitude'})
-c['longitude'].attrs['VAR_TYPE'] = 'support_data'
-c['latitude'].attrs['VAR_TYPE'] = 'support_data'
-c['altitude'].attrs['VAR_TYPE'] = 'support_data'
-
-cdflib.xarray_to_cdf(c, 'MGITM_LS180_F130_150615-created-from-netcdf-input.cdf')
-d = cdflib.cdf_to_xarray('MGITM_LS180_F130_150615-created-from-netcdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('MGITM_LS180_F130_150615-created-from-netcdf-input.cdf')
-#
-
-#
-
-### ANOTHER REALLY GOOD EXAMPLE
-
-import cdflib
-import xarray as xr
-import numpy as np
-from cdflib.epochs import CDFepoch as cdfepoch
-import os
-c = xr.load_dataset("C:/Work/cdf_test_files/dn_magn-l2-hires_g17_d20211219_v1-0-1.nc")
-for var in c:
- c[var].attrs['VAR_TYPE'] = 'data'
-c['coordinate'].attrs['VAR_TYPE'] = 'support_data'
-c['time'].attrs['VAR_TYPE'] = 'support_data'
-c['time_orbit'].attrs['VAR_TYPE'] = 'support_data'
-cdflib.xarray_to_cdf(c, 'dn_magn-l2-hires_g17_d20211219_v1-0-1-created-from-netcdf-input.cdf')
-d = cdflib.cdf_to_xarray('dn_magn-l2-hires_g17_d20211219_v1-0-1-created-from-netcdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('dn_magn-l2-hires_g17_d20211219_v1-0-1-created-from-netcdf-input.cdf')
-#
-
-#
-import cdflib
-import xarray as xr
-import numpy as np
-from cdflib.epochs import CDFepoch as cdfepoch
-import os
-c = xr.load_dataset("C:/Work/cdf_test_files/SABER_L2B_2021020_103692_02.07.nc")
-for var in c:
- c[var].attrs['VAR_TYPE'] = 'data'
-c['event'].attrs['VAR_TYPE'] = 'support_data'
-c['sclatitude'].attrs['VAR_TYPE'] = 'support_data'
-c['sclongitude'].attrs['VAR_TYPE'] = 'support_data'
-c['scaltitude'].attrs['VAR_TYPE'] = 'support_data'
-cdflib.xarray_to_cdf(c, 'SABER_L2B_2021020_103692_02.07-created-from-netcdf-input.cdf')
-d = cdflib.cdf_to_xarray('SABER_L2B_2021020_103692_02.07-created-from-netcdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('SABER_L2B_2021020_103692_02.07-created-from-netcdf-input.cdf')
-#
-
-#
-import cdflib
-import xarray as xr
-import numpy as np
-from cdflib.epochs import CDFepoch as cdfepoch
-import os
-
-a = cdflib.cdf_to_xarray("C:/Work/cdf_test_files/mvn_euv_l3_minute_20201130_v14_r02.cdf", to_unixtime=True, fillval_to_nan=True)
-cdflib.xarray_to_cdf(a, 'mvn_euv_l3_minute_20201130_v14_r02-created-from-cdf-input.cdf', from_unixtime=True)
-b = cdflib.cdf_to_xarray('mvn_euv_l3_minute_20201130_v14_r02-created-from-cdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('mvn_euv_l3_minute_20201130_v14_r02-created-from-cdf-input.cdf')
-#
-
-#
-import cdflib
-import xarray as xr
-import numpy as np
-from cdflib.epochs import CDFepoch as cdfepoch
-import os
-
-a = cdflib.cdf_to_xarray("C:/Work/cdf_test_files/mvn_lpw_l2_lpiv_20180717_v02_r02.cdf", to_unixtime=True, fillval_to_nan=True)
-cdflib.xarray_to_cdf(a, 'mvn_lpw_l2_lpiv_20180717_v02_r02-created-from-cdf-input.cdf', from_unixtime=True)
-b = cdflib.cdf_to_xarray('mvn_lpw_l2_lpiv_20180717_v02_r02-created-from-cdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('mvn_lpw_l2_lpiv_20180717_v02_r02-created-from-cdf-input.cdf')
-
-c = xr.load_dataset("C:/Work/cdf_test_files/mvn_lpw_l2_lpiv_20180717_v02_r02.nc")
-cdflib.xarray_to_cdf(c, 'mvn_lpw_l2_lpiv_20180717_v02_r02-created-from-netcdf-input.cdf')
-d = cdflib.cdf_to_xarray('mvn_lpw_l2_lpiv_20180717_v02_r02-created-from-netcdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('mvn_lpw_l2_lpiv_20180717_v02_r02-created-from-netcdf-input.cdf')
-#
-
-#
-import cdflib
-import xarray as xr
-import numpy as np
-from cdflib.epochs import CDFepoch as cdfepoch
-import os
-
-a = cdflib.cdf_to_xarray("C:/Work/cdf_test_files/mvn_lpw_l2_lpnt_20180717_v03_r01.cdf", to_unixtime=True, fillval_to_nan=True)
-cdflib.xarray_to_cdf(a, 'mvn_lpw_l2_lpnt_20180717_v03_r01-created-from-cdf-input.cdf', from_unixtime=True)
-b = cdflib.cdf_to_xarray('mvn_lpw_l2_lpnt_20180717_v03_r01-created-from-cdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('mvn_lpw_l2_lpnt_20180717_v03_r01-created-from-cdf-input.cdf')
-
-c = xr.load_dataset("C:/Work/cdf_test_files/mvn_lpw_l2_lpnt_20180717_v03_r01.nc")
-cdflib.xarray_to_cdf(c, 'mvn_lpw_l2_lpnt_20180717_v03_r01-created-from-netcdf-input.cdf')
-d = cdflib.cdf_to_xarray('mvn_lpw_l2_lpnt_20180717_v03_r01-created-from-netcdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('mvn_lpw_l2_lpnt_20180717_v03_r01-created-from-netcdf-input.cdf')
-#
-
-#
-import cdflib
-import xarray as xr
-import numpy as np
-from cdflib.epochs import CDFepoch as cdfepoch
-import os
-
-a = cdflib.cdf_to_xarray("C:/Work/cdf_test_files/mvn_lpw_l2_mrgscpot_20180717_v02_r01.cdf", to_unixtime=True, fillval_to_nan=True)
-cdflib.xarray_to_cdf(a, 'mvn_lpw_l2_mrgscpot_20180717_v02_r01-created-from-cdf-input.cdf', from_unixtime=True)
-b = cdflib.cdf_to_xarray('mvn_lpw_l2_mrgscpot_20180717_v02_r01-created-from-cdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('mvn_lpw_l2_mrgscpot_20180717_v02_r01-created-from-cdf-input.cdf')
-
-c = xr.load_dataset("C:/Work/cdf_test_files/mvn_lpw_l2_mrgscpot_20180717_v02_r01.nc")
-cdflib.xarray_to_cdf(c, 'mvn_lpw_l2_mrgscpot_20180717_v02_r01-created-from-netcdf-input.cdf')
-d = cdflib.cdf_to_xarray('mvn_lpw_l2_mrgscpot_20180717_v02_r01-created-from-netcdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('mvn_lpw_l2_mrgscpot_20180717_v02_r01-created-from-netcdf-input.cdf')
-#
-
-#
-import cdflib
-import xarray as xr
-import numpy as np
-from cdflib.epochs import CDFepoch as cdfepoch
-import os
-
-a = cdflib.cdf_to_xarray("C:/Work/cdf_test_files/mvn_sep_l2_anc_20210501_v06_r00.cdf", to_unixtime=True, fillval_to_nan=True)
-cdflib.xarray_to_cdf(a, 'mvn_sep_l2_anc_20210501_v06_r00-created-from-cdf-input.cdf', from_unixtime=True)
-b = cdflib.cdf_to_xarray('mvn_sep_l2_anc_20210501_v06_r00-created-from-cdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('mvn_sep_l2_anc_20210501_v06_r00-created-from-cdf-input.cdf')
-#
-
-
-#
-import cdflib
-import xarray as xr
-import numpy as np
-from cdflib.epochs import CDFepoch as cdfepoch
-import os
-
-a = cdflib.cdf_to_xarray("C:/Work/cdf_test_files/mvn_sep_l2_s2-raw-svy-full_20191231_v04_r05.cdf", to_unixtime=True, fillval_to_nan=True)
-cdflib.xarray_to_cdf(a, 'mvn_sep_l2_s2-raw-svy-full_20191231_v04_r05-created-from-cdf-input.cdf', from_unixtime=True)
-b = cdflib.cdf_to_xarray('mvn_sep_l2_s2-raw-svy-full_20191231_v04_r05-created-from-cdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('mvn_sep_l2_s2-raw-svy-full_20191231_v04_r05-created-from-cdf-input.cdf')
-#
-
-#
-import cdflib
-import xarray as xr
-import numpy as np
-from cdflib.epochs import CDFepoch as cdfepoch
-import os
-
-a = cdflib.cdf_to_xarray("C:/Work/cdf_test_files/mvn_sta_l2_d1-32e4d16a8m_20201130_v02_r04.cdf", to_unixtime=True, fillval_to_nan=True)
-cdflib.xarray_to_cdf(a, 'mvn_sta_l2_d1-32e4d16a8m_20201130_v02_r04-created-from-cdf-input.cdf', from_unixtime=True)
-b = cdflib.cdf_to_xarray('mvn_sta_l2_d1-32e4d16a8m_20201130_v02_r04-created-from-cdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('mvn_sta_l2_d1-32e4d16a8m_20201130_v02_r04-created-from-cdf-input.cdf')
-#
-
-#
-import cdflib
-import xarray as xr
-import numpy as np
-from cdflib.epochs import CDFepoch as cdfepoch
-import os
-
-a = cdflib.cdf_to_xarray("C:/Work/cdf_test_files/mvn_swe_l2_arc3d_20180717_v04_r02.cdf", to_unixtime=True, fillval_to_nan=True)
-cdflib.xarray_to_cdf(a, 'mvn_swe_l2_arc3d_20180717_v04_r02-created-from-cdf-input.cdf', from_unixtime=True)
-b = cdflib.cdf_to_xarray('mvn_swe_l2_arc3d_20180717_v04_r02-created-from-cdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('mvn_swe_l2_arc3d_20180717_v04_r02-created-from-cdf-input.cdf')
-
-c = xr.load_dataset("C:/Work/cdf_test_files/mvn_swe_l2_arc3d_20180717_v04_r02.nc")
-cdflib.xarray_to_cdf(c, 'mvn_swe_l2_arc3d_20180717_v04_r02-created-from-netcdf-input.cdf')
-d = cdflib.cdf_to_xarray('mvn_swe_l2_arc3d_20180717_v04_r02-created-from-netcdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('mvn_swe_l2_arc3d_20180717_v04_r02-created-from-netcdf-input.cdf')
-#
-
-#
-import cdflib
-import xarray as xr
-import numpy as np
-from cdflib.epochs import CDFepoch as cdfepoch
-import os
-
-a = cdflib.cdf_to_xarray("C:/Work/cdf_test_files/mvn_swe_l2_svyspec_20180718_v04_r04.cdf", to_unixtime=True, fillval_to_nan=True)
-cdflib.xarray_to_cdf(a, 'mvn_swe_l2_svyspec_20180718_v04_r04-created-from-cdf-input.cdf', from_unixtime=True)
-b = cdflib.cdf_to_xarray('mvn_swe_l2_svyspec_20180718_v04_r04-created-from-cdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('mvn_swe_l2_svyspec_20180718_v04_r04-created-from-cdf-input.cdf')
-
-c = xr.load_dataset("C:/Work/cdf_test_files/mvn_swe_l2_svyspec_20180718_v04_r04.nc")
-cdflib.xarray_to_cdf(c, 'mvn_swe_l2_svyspec_20180718_v04_r04-created-from-netcdf-input.cdf')
-d = cdflib.cdf_to_xarray('mvn_swe_l2_svyspec_20180718_v04_r04-created-from-netcdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('mvn_swe_l2_svyspec_20180718_v04_r04-created-from-netcdf-input.cdf')
-#
-
-#
-import cdflib
-import xarray as xr
-import numpy as np
-from cdflib.epochs import CDFepoch as cdfepoch
-import os
-
-a = cdflib.cdf_to_xarray("C:/Work/cdf_test_files/mvn_swi_l2_finearc3d_20180720_v01_r01.cdf", to_unixtime=True, fillval_to_nan=True)
-cdflib.xarray_to_cdf(a, 'mvn_swi_l2_finearc3d_20180720_v01_r01-created-from-cdf-input.cdf', from_unixtime=True)
-b = cdflib.cdf_to_xarray('mvn_swi_l2_finearc3d_20180720_v01_r01-created-from-cdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('mvn_swi_l2_finearc3d_20180720_v01_r01-created-from-cdf-input.cdf')
-
-c = xr.load_dataset("C:/Work/cdf_test_files/mvn_swi_l2_finearc3d_20180720_v01_r01.nc")
-cdflib.xarray_to_cdf(c, 'mvn_swi_l2_finearc3d_20180720_v01_r01-created-from-netcdf-input.cdf')
-d = cdflib.cdf_to_xarray('mvn_swi_l2_finearc3d_20180720_v01_r01-created-from-netcdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('mvn_swi_l2_finearc3d_20180720_v01_r01-created-from-netcdf-input.cdf')
-#
-
-#
-import cdflib
-import xarray as xr
-import numpy as np
-from cdflib.epochs import CDFepoch as cdfepoch
-import os
-
-a = cdflib.cdf_to_xarray("C:/Work/cdf_test_files/mvn_swi_l2_onboardsvyspec_20180720_v01_r01.cdf", to_unixtime=True, fillval_to_nan=True)
-cdflib.xarray_to_cdf(a, 'mvn_swi_l2_onboardsvyspec_20180720_v01_r01-created-from-cdf-input.cdf', from_unixtime=True)
-b = cdflib.cdf_to_xarray('mvn_swi_l2_onboardsvyspec_20180720_v01_r01-created-from-cdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('mvn_swi_l2_onboardsvyspec_20180720_v01_r01-created-from-cdf-input.cdf')
-
-c = xr.load_dataset("C:/Work/cdf_test_files/mvn_swi_l2_onboardsvyspec_20180720_v01_r01.nc")
-cdflib.xarray_to_cdf(c, 'mvn_swi_l2_onboardsvyspec_20180720_v01_r01-created-from-netcdf-input.cdf')
-d = cdflib.cdf_to_xarray('mvn_swi_l2_onboardsvyspec_20180720_v01_r01-created-from-netcdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('mvn_swi_l2_onboardsvyspec_20180720_v01_r01-created-from-netcdf-input.cdf')
-#
-
-#
-import cdflib
-import xarray as xr
-import numpy as np
-from cdflib.epochs import CDFepoch as cdfepoch
-import os
-
-a = cdflib.cdf_to_xarray("C:/Work/cdf_test_files/omni_hro2_1min_20151001_v01.cdf", to_unixtime=True, fillval_to_nan=True)
-cdflib.xarray_to_cdf(a, 'omni_hro2_1min_20151001_v01-created-from-cdf-input.cdf', from_unixtime=True)
-b = cdflib.cdf_to_xarray('omni_hro2_1min_20151001_v01-created-from-cdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('omni_hro2_1min_20151001_v01-created-from-cdf-input.cdf')
-
-c = xr.load_dataset("C:/Work/cdf_test_files/omni_hro2_1min_20151001_v01.nc")
-cdflib.xarray_to_cdf(c, 'omni_hro2_1min_20151001_v01-created-from-netcdf-input.cdf')
-d = cdflib.cdf_to_xarray('omni_hro2_1min_20151001_v01-created-from-netcdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('omni_hro2_1min_20151001_v01-created-from-netcdf-input.cdf')
-#
-
-#
-import cdflib
-import xarray as xr
-import numpy as np
-from cdflib.epochs import CDFepoch as cdfepoch
-import os
-
-c = xr.load_dataset("C:/Work/cdf_test_files/raids_nirs_20100823_v1.1.nc")
-cdflib.xarray_to_cdf(c, 'raids_nirs_20100823_v1.1-created-from-netcdf-input.cdf')
-d = cdflib.cdf_to_xarray('raids_nirs_20100823_v1.1-created-from-netcdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('raids_nirs_20100823_v1.1-created-from-netcdf-input.cdf')
-#
-
-#
-import cdflib
-import xarray as xr
-import numpy as np
-from cdflib.epochs import CDFepoch as cdfepoch
-import os
-
-a = cdflib.cdf_to_xarray("C:/Work/cdf_test_files/rbsp-a_magnetometer_1sec-gsm_emfisis-l3_20190122_v1.6.2.cdf", to_unixtime=True, fillval_to_nan=True)
-cdflib.xarray_to_cdf(a, 'rbsp-a_magnetometer_1sec-gsm_emfisis-l3_20190122_v1.6.2-created-from-cdf-input.cdf', from_unixtime=True)
-b = cdflib.cdf_to_xarray('rbsp-a_magnetometer_1sec-gsm_emfisis-l3_20190122_v1.6.2-created-from-cdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('rbsp-a_magnetometer_1sec-gsm_emfisis-l3_20190122_v1.6.2-created-from-cdf-input.cdf')
-
-#
-
-#
-import cdflib
-import xarray as xr
-import numpy as np
-from cdflib.epochs import CDFepoch as cdfepoch
-import os
-
-c = xr.load_dataset("C:/Work/cdf_test_files/see__L3_2021009_012_01.ncdf")
-cdflib.xarray_to_cdf(c, 'see__L3_2021009_012_01.ncdfhello2.cdf')
-d = cdflib.cdf_to_xarray('see__L3_2021009_012_01.ncdfhello2.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('see__L3_2021009_012_01.ncdfhello2.cdf')
-#
-
-#
-import cdflib
-import xarray as xr
-import numpy as np
-from cdflib.epochs import CDFepoch as cdfepoch
-import os
-
-c = xr.load_dataset("C:/Work/cdf_test_files/see__xps_L2A_2021006_012_02.ncdf")
-cdflib.xarray_to_cdf(c, 'see__xps_L2A_2021006_012_02.ncdfhello2.cdf')
-d = cdflib.cdf_to_xarray('see__xps_L2A_2021006_012_02.ncdfhello2.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('see__xps_L2A_2021006_012_02.ncdfhello2.cdf')
-#
-
-#
-import cdflib
-import xarray as xr
-import numpy as np
-from cdflib.epochs import CDFepoch as cdfepoch
-import os
-
-c = xr.load_dataset("C:/Work/cdf_test_files/sgpsondewnpnC1.nc")
-cdflib.xarray_to_cdf(c, 'sgpsondewnpnC1-created-from-netcdf-input.cdf')
-d = cdflib.cdf_to_xarray('sgpsondewnpnC1-created-from-netcdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('sgpsondewnpnC1-created-from-netcdf-input.cdf')
-#
-
-
-import cdflib
-import xarray as xr
-import numpy as np
-from cdflib.epochs import CDFepoch as cdfepoch
-import os
-
-a = cdflib.cdf_to_xarray("C:/Work/cdf_test_files/thc_l2_sst_20210709_v01.cdf", to_unixtime=True, fillval_to_nan=True)
-cdflib.xarray_to_cdf(a, 'thc_l2_sst_20210709_v01-created-from-cdf-input.cdf', from_unixtime=True)
-b = cdflib.cdf_to_xarray('thc_l2_sst_20210709_v01-created-from-cdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('thc_l2_sst_20210709_v01-created-from-cdf-input.cdf')
-
-c = xr.load_dataset("C:/Work/cdf_test_files/thc_l2_sst_20210709_v01.nc")
-cdflib.xarray_to_cdf(c, 'thc_l2_sst_20210709_v01-created-from-netcdf-input.cdf')
-d = cdflib.cdf_to_xarray('thc_l2_sst_20210709_v01-created-from-netcdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('thc_l2_sst_20210709_v01-created-from-netcdf-input.cdf')
-
-
-#
-import cdflib
-import xarray as xr
-import numpy as np
-from cdflib.epochs import CDFepoch as cdfepoch
-import os
-
-a = cdflib.cdf_to_xarray("C:/Work/cdf_test_files/thg_l2_mag_amd_20070323_v01.cdf", to_unixtime=True, fillval_to_nan=True)
-cdflib.xarray_to_cdf(a, 'thg_l2_mag_amd_20070323_v01-created-from-cdf-input.cdf', from_unixtime=True)
-b = cdflib.cdf_to_xarray('thg_l2_mag_amd_20070323_v01-created-from-cdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('thg_l2_mag_amd_20070323_v01-created-from-cdf-input.cdf')
-
-c = xr.load_dataset("C:/Work/cdf_test_files/thg_l2_mag_amd_20070323_v01.nc")
-cdflib.xarray_to_cdf(c, 'thg_l2_mag_amd_20070323_v01-created-from-netcdf-input.cdf')
-d = cdflib.cdf_to_xarray('thg_l2_mag_amd_20070323_v01-created-from-netcdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('thg_l2_mag_amd_20070323_v01-created-from-netcdf-input.cdf')
-#
-
-#
-import cdflib
-import xarray as xr
-import numpy as np
-from cdflib.epochs import CDFepoch as cdfepoch
-import os
-
-a = cdflib.cdf_to_xarray("C:/Work/cdf_test_files/wi_elsp_3dp_20210115_v01.cdf", to_unixtime=True, fillval_to_nan=True)
-cdflib.xarray_to_cdf(a, 'wi_elsp_3dp_20210115_v01-created-from-cdf-input.cdf', from_unixtime=True)
-b = cdflib.cdf_to_xarray('wi_elsp_3dp_20210115_v01-created-from-cdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('wi_elsp_3dp_20210115_v01-created-from-cdf-input.cdf')
-
-c = xr.load_dataset("C:/Work/cdf_test_files/wi_elsp_3dp_20210115_v01.nc")
-cdflib.xarray_to_cdf(c, 'wi_elsp_3dp_20210115_v01-created-from-netcdf-input.cdf')
-d = cdflib.cdf_to_xarray('wi_elsp_3dp_20210115_v01-created-from-netcdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('wi_elsp_3dp_20210115_v01-created-from-netcdf-input.cdf')
-#
-
-#
-import cdflib
-import xarray as xr
-import numpy as np
-from cdflib.epochs import CDFepoch as cdfepoch
-import os
-
-a = cdflib.cdf_to_xarray("C:/Work/cdf_test_files/wi_k0_spha_20210121_v01.cdf", to_unixtime=True, fillval_to_nan=True)
-cdflib.xarray_to_cdf(a, 'wi_k0_spha_20210121_v01-created-from-cdf-input.cdf', from_unixtime=True)
-b = cdflib.cdf_to_xarray('wi_k0_spha_20210121_v01-created-from-cdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('wi_k0_spha_20210121_v01-created-from-cdf-input.cdf')
-
-c = xr.load_dataset("C:/Work/cdf_test_files/wi_elsp_3dp_20210115_v01.nc")
-cdflib.xarray_to_cdf(c, 'wi_k0_spha_20210121_v01-created-from-netcdf-input.cdf')
-d = cdflib.cdf_to_xarray('wi_k0_spha_20210121_v01-created-from-netcdf-input.cdf', to_unixtime=True, fillval_to_nan=True)
-os.remove('wi_k0_spha_20210121_v01-created-from-netcdf-input.cdf')
-#
-
-
-
-
-import cdflib
-import xarray as xr
-import os
-var_data = [[1, 2, 3], [1, 2, 3], [1, 2, 3]]
-var_dims = ['epoch', 'direction']
-data = xr.Variable(var_dims, var_data)
-epoch_data = [1, 2, 3]
-epoch_dims = ['epoch']
-epoch = xr.Variable(epoch_dims, epoch_data)
-ds = xr.Dataset(data_vars={'data': data, 'epoch': epoch})
-cdflib.xarray_to_cdf(ds, 'hello.cdf')
-os.remove('hello.cdf')
-global_attributes = {'Project': 'Hail Mary',
- 'Source_name': 'Thin Air',
- 'Discipline': 'None',
- 'Data_type': 'counts',
- 'Descriptor': 'Midichlorians in unicorn blood',
- 'Data_version': '3.14',
- 'Logical_file_id': 'SEVENTEEN',
- 'PI_name': 'Darth Vader',
- 'PI_affiliation': 'Dark Side',
- 'TEXT': 'AHHHHH',
- 'Instrument_type': 'Banjo',
- 'Mission_group': 'Impossible',
- 'Logical_source': ':)',
- 'Logical_source_description': ':('}
-data = xr.Variable(var_dims, var_data)
-epoch = xr.Variable(epoch_dims, epoch_data)
-ds = xr.Dataset(data_vars={'data': data, 'epoch': epoch}, attrs=global_attributes)
-cdflib.xarray_to_cdf(ds, 'hello.cdf')
-os.remove('hello.cdf')
-dir_data = [1, 2, 3]
-dir_dims = ['direction']
-direction = xr.Variable(dir_dims, dir_data)
-ds = xr.Dataset(data_vars={'data': data, 'epoch': epoch, 'direction':direction}, attrs=global_attributes)
-cdflib.xarray_to_cdf(ds, 'hello.cdf')
-os.remove('hello.cdf')
-'''
\ No newline at end of file