From 16b84e8e7933b98c63eaa4fbeb47f6e28a45d611 Mon Sep 17 00:00:00 2001 From: Mike Taves Date: Sat, 22 Jan 2022 02:07:14 +1300 Subject: [PATCH] style(import): consistently use relative imports (#1330) * Convert line endings to UNIX; remove BOM * Use relative imports --- flopy/discretization/unstructuredgrid.py | 4 +- flopy/discretization/vertexgrid.py | 2 +- flopy/export/shapefile_utils.py | 2 +- flopy/export/vtk.py | 3886 ++++++------- flopy/mbase.py | 4 +- flopy/mf6/data/mfdata.py | 2 +- flopy/mf6/data/mfdataarray.py | 4 +- flopy/mf6/data/mfdatalist.py | 4 +- flopy/mf6/data/mfdatascalar.py | 4 +- flopy/mf6/mfmodel.py | 4 +- flopy/mf6/mfpackage.py | 4 +- flopy/mf6/modflow/mfsimulation.py | 2 +- flopy/mf6/utils/createpackages.py | 2 + flopy/modflow/mf.py | 2 +- flopy/modflow/mfsfr2.py | 18 +- flopy/modflow/mfuzf1.py | 2 +- flopy/mt3d/mt.py | 2 +- flopy/pakbase.py | 6 +- flopy/seawat/swt.py | 2 +- flopy/utils/datafile.py | 2 +- flopy/utils/util_array.py | 12 +- flopy/utils/util_list.py | 4 +- flopy/utils/zonbud.py | 6322 +++++++++++----------- 23 files changed, 5149 insertions(+), 5147 deletions(-) diff --git a/flopy/discretization/unstructuredgrid.py b/flopy/discretization/unstructuredgrid.py index 6d0adb3f31..782c35b7f5 100644 --- a/flopy/discretization/unstructuredgrid.py +++ b/flopy/discretization/unstructuredgrid.py @@ -616,7 +616,7 @@ def plot(self, **kwargs): lc : matplotlib.collections.LineCollection """ - from flopy.plot import PlotMapView + from ..plot import PlotMapView layer = 0 if "layer" in kwargs: @@ -837,7 +837,7 @@ def ncpl_from_ihc(ihc, iac): number of cells per plottable layer """ - from flopy.utils.gridgen import get_ia_from_iac + from ..utils.gridgen import get_ia_from_iac valid = False ia = get_ia_from_iac(iac) diff --git a/flopy/discretization/vertexgrid.py b/flopy/discretization/vertexgrid.py index 558dc10dc9..54ad1bf775 100644 --- a/flopy/discretization/vertexgrid.py +++ b/flopy/discretization/vertexgrid.py @@ -361,7 +361,7 @@ def plot(self, **kwargs): lc : matplotlib.collections.LineCollection """ - from flopy.plot import PlotMapView + from ..plot import PlotMapView mm = PlotMapView(modelgrid=self) return mm.plot_grid(**kwargs) diff --git a/flopy/export/shapefile_utils.py b/flopy/export/shapefile_utils.py index 9ded65c505..cfce7dbac8 100755 --- a/flopy/export/shapefile_utils.py +++ b/flopy/export/shapefile_utils.py @@ -896,7 +896,7 @@ def get_spatialreference(epsg, text="esriwkt"): url : str """ - from flopy.utils.flopy_io import get_url_text + from ..utils.flopy_io import get_url_text epsg_categories = ( "epsg", diff --git a/flopy/export/vtk.py b/flopy/export/vtk.py index 16696cab75..93ff7a0919 100644 --- a/flopy/export/vtk.py +++ b/flopy/export/vtk.py @@ -1,1943 +1,1943 @@ -""" -The vtk module provides functionality for exporting model inputs and -outputs to VTK. -""" - -import os -import warnings -import numpy as np -from flopy.datbase import DataType, DataInterface -from flopy.utils import Util3d - -from ..utils import import_optional_dependency - -warnings.simplefilter("always", DeprecationWarning) - - -VTKIGNORE = ("vkcb", "perlen", "steady", "tsmult", "nstp") - - -class Pvd: - """ - Simple class to build a Paraview Data File (PVD) - - """ - - def __init__(self): - self.__data = [ - '\n', - '\n', - "\n", - ] - - def add_timevalue(self, file, timevalue): - """ - Method to add a Dataset record to the pvd file - - Parameters - ---------- - file : str - vtu file name - timevalue : float - time step value in model time - """ - if not file.endswith(".vtu"): - raise AssertionError("File name must end with .vtu ") - - file = os.path.split(file)[-1] - - record = ( - f'\n' - ) - self.__data.append(record) - - def write(self, f): - """ - Method to write a pvd file from the PVD object - - Paramaters - ---------- - f : str - PVD file name - - """ - if not f.endswith(".pvd"): - f += ".pvd" - - with open(f, "w") as foo: - foo.writelines(self.__data) - foo.write("\n") - foo.write("") - - -class Vtk: - """ - Class that builds VTK objects and exports models to VTK files - - - Parameters: - ---------- - model : flopy.ModelInterface object - any flopy model object, example flopy.modflow.Modflow() object - modelgrid : flopy.discretization.Grid object - any flopy modelgrid object, example. VertexGrid - vertical_exageration : float - floating point value to scale vertical exageration of the vtk points - default is 1. - binary : bool - flag that indicates if Vtk will write a binary or text file. Binary - is prefered as paraview has a bug (8/4/2021) where is cannot represent - NaN values from ASCII (non xml) files. In this case no-data values - are set to 1e+30. - xml : bool - flag to write xml based VTK files - pvd : bool - boolean flag to write a paraview pvd file for transient data. This - file maps vtk files to a model time. - shared_points : bool - boolean flag to share points in grid polyhedron construction. Default - is False, as paraview has a bug (8/4/2021) where some polyhedrons will - not properly close when using shared points. If shared_points is True - file size will be slightly smaller. - smooth : bool - boolean flag to interpolate vertex elevations based on shared cell - elevations. Default is False. - point_scalars : bool - boolen flag to write interpolated data at each point based "shared - vertices". - - """ - - def __init__( - self, - model=None, - modelgrid=None, - vertical_exageration=1, - binary=True, - xml=False, - pvd=False, - shared_points=False, - smooth=False, - point_scalars=False, - ): - - vtk = import_optional_dependency("vtk") - - if model is None and modelgrid is None: - raise AssertionError( - "A model or modelgrid must be provided to use Vtk" - ) - - elif model is not None: - self.modelgrid = model.modelgrid - self.modeltime = model.modeltime - else: - self.modelgrid = modelgrid - self.modeltime = None - - self.binary = binary - self.xml = xml - self.pvd = pvd - - if self.pvd and not self.xml: - print( - "Switching to xml, ASCII and standard binary are not " - "supported by Paraview's PVD reader" - ) - self.xml = True - - self.vertical_exageration = vertical_exageration - self.shared_points = shared_points - self.smooth = smooth - self.point_scalars = point_scalars - - self.verts = self.modelgrid.verts - self.iverts = self.modelgrid.iverts - self.nnodes = self.modelgrid.nnodes - - nvpl = 0 - for iv in self.iverts: - nvpl += len(iv) - 1 - - self.nvpl = nvpl - - # method to accomodate DISU grids, do not use modelgrid.ncpl! - self.ncpl = len(self.iverts) - if self.nnodes == len(self.iverts): - self.nlay = 1 - else: - self.nlay = self.modelgrid.nlay - - self._laycbd = self.modelgrid.laycbd - if self._laycbd is None: - self._laycbd = np.zeros((self.nlay,), dtype=int) - - self._active = [] - self._ncbd = 0 - for i in range(self.nlay): - self._active.append(1) - if self._laycbd[i] != 0: - self._active.append(0) - self._ncbd += 1 - - # USG trap - if self.modelgrid.top.size == self.nnodes: - self.top = self.modelgrid.top.reshape(self.nnodes) - else: - self.top = self.modelgrid.top.reshape(self.ncpl) - self.botm = self.modelgrid.botm.reshape(-1, self.ncpl) - - if self.modelgrid.idomain is not None: - self.idomain = self.modelgrid.idomain.reshape(self.nnodes) - else: - self.idomain = np.ones((self.nnodes,), dtype=int) - - if self.modeltime is not None: - perlen = self.modeltime.perlen - self._totim = np.add.accumulate(perlen) - - self.points = [] - self.faces = [] - self.vtk_grid = None - self.vtk_polygons = None - self.vtk_pathlines = None - self._pathline_points = [] - self._point_scalar_graph = None - self._point_scalar_numpy_graph = None - self._idw_weight_graph = None - self._idw_total_weight_graph = None - - self._vtk_geometry_set = False - self.__transient_output_data = False - self.__transient_data = {} - self.__transient_vector = {} - self.__pathline_transient_data = {} - self.__vtk = vtk - - if self.point_scalars: - self._create_point_scalar_graph() - - def _create_smoothed_elevation_graph(self, adjk, top_elev=True): - """ - Method to create a dictionary of shared point elevations - - Parameters - ---------- - adjk : int - confining bed adjusted layer - - Returns - ------- - dict : {vertex number: elevation} - - """ - elevations = {} - for i, iv in enumerate(self.iverts): - iv = iv[1:] - for v in iv: - if v is None: - continue - if not top_elev: - zv = self.botm[adjk][i] * self.vertical_exageration - elif adjk == 0: - zv = self.top[i] * self.vertical_exageration - else: - if self.top.size == self.nnodes: - adji = (adjk * self.ncpl) + i - zv = self.top[adji] * self.vertical_exageration - else: - zv = self.botm[adjk - 1][i] * self.vertical_exageration - - if v in elevations: - elevations[v].append(zv) - else: - elevations[v] = [zv] - - for key in elevations: - elevations[key] = np.mean(elevations[key]) - - return elevations - - def _create_point_scalar_graph(self): - """ - Method to create a point scalar graph to map cells to points - - """ - graph = {} - v0 = 0 - v1 = 0 - nvert = len(self.verts) - shared_points = self.shared_points - if len(self._active) != self.nlay: - shared_points = False - - for k in range(self.nlay): - for i, iv in enumerate(self.iverts): - iv = iv[1:] - adji = (k * self.ncpl) + i - for v in iv: - if v is None: - continue - xvert = self.verts[v, 0] - yvert = self.verts[v, 1] - adjv = v + v1 - if adjv not in graph: - graph[adjv] = { - "vtk_points": [v0], - "idx": [adji], - "xv": [xvert], - "yv": [yvert], - } - else: - graph[adjv]["vtk_points"].append(v0) - graph[adjv]["idx"].append(adji) - graph[adjv]["xv"].append(xvert) - graph[adjv]["yv"].append(yvert) - v0 += 1 - - v1 += nvert - - if k == self.nlay - 1 or not shared_points: - for i, iv in enumerate(self.iverts): - iv = iv[1:] - adji = (k * self.ncpl) + i - for v in iv: - if v is None: - continue - xvert = self.verts[v, 0] - yvert = self.verts[v, 1] - adjv = v + v1 - if adjv not in graph: - graph[adjv] = { - "vtk_points": [v0], - "idx": [adji], - "xv": [xvert], - "yv": [yvert], - } - else: - graph[adjv]["vtk_points"].append(v0) - graph[adjv]["idx"].append(adji) - graph[adjv]["xv"].append(xvert) - graph[adjv]["yv"].append(yvert) - v0 += 1 - v1 += nvert - - # convert this to a numpy representation - max_shared = 0 - num_points = v0 - for k, d in graph.items(): - if len(d["vtk_points"]) > max_shared: - max_shared = len(d["vtk_points"]) - - numpy_graph = np.ones((max_shared, num_points), dtype=int) * -1 - xvert = np.ones((max_shared, num_points)) * np.nan - yvert = np.ones((max_shared, num_points)) * np.nan - for k, d in graph.items(): - for _, pt in enumerate(d["vtk_points"]): - for ixx, value in enumerate(d["idx"]): - numpy_graph[ixx, pt] = value - xvert[ixx, pt] = d["xv"][ixx] - yvert[ixx, pt] = d["yv"][ixx] - - # now create the IDW weights for point scalars - xc = np.ravel(self.modelgrid.xcellcenters) - yc = np.ravel(self.modelgrid.ycellcenters) - - if xc.size != self.nnodes: - xc = np.tile(xc, self.nlay) - yc = np.tile(yc, self.nlay) - - xc = np.where(numpy_graph != -1, xc[numpy_graph], np.nan) - yc = np.where(numpy_graph != -1, yc[numpy_graph], np.nan) - - asq = (xvert - xc) ** 2 - bsq = (yvert - yc) ** 2 - - weights = np.sqrt(asq + bsq) - tot_weights = np.nansum(weights, axis=0) - - self._point_scalar_graph = graph - self._point_scalar_numpy_graph = numpy_graph - self._idw_weight_graph = weights - self._idw_total_weight_graph = tot_weights - - def _build_grid_geometry(self): - """ - Method that creates lists of vertex points and cell faces - - """ - points = [] - faces = [] - v0 = 0 - v1 = 0 - ncb = 0 - shared_points = self.shared_points - if len(self._active) != self.nlay: - shared_points = False - - for k in range(self.nlay): - adjk = k + ncb - if k != self.nlay - 1: - if self._active[adjk + 1] == 0: - ncb += 1 - - if self.smooth: - elevations = self._create_smoothed_elevation_graph(adjk) - - for i, iv in enumerate(self.iverts): - iv = iv[1:] - for v in iv: - if v is None: - continue - xv = self.verts[v, 0] - yv = self.verts[v, 1] - if self.smooth: - zv = elevations[v] - elif k == 0: - zv = self.top[i] * self.vertical_exageration - else: - if self.top.size == self.nnodes: - adji = (adjk * self.ncpl) + i - zv = self.top[adji] * self.vertical_exageration - else: - zv = ( - self.botm[adjk - 1][i] - * self.vertical_exageration - ) - - points.append([xv, yv, zv]) - v1 += 1 - - cell_faces = [ - [v for v in range(v0, v1)], - [v + self.nvpl for v in range(v0, v1)], - ] - - for v in range(v0, v1): - if v != v1 - 1: - cell_faces.append( - [v + 1, v, v + self.nvpl, v + self.nvpl + 1] - ) - else: - cell_faces.append( - [v0, v, v + self.nvpl, v0 + self.nvpl] - ) - - v0 = v1 - faces.append(cell_faces) - - if k == self.nlay - 1 or not shared_points: - if self.smooth: - elevations = self._create_smoothed_elevation_graph( - adjk, top_elev=False - ) - - for i, iv in enumerate(self.iverts): - iv = iv[1:] - for v in iv: - if v is None: - continue - xv = self.verts[v, 0] - yv = self.verts[v, 1] - if self.smooth: - zv = elevations[v] - else: - zv = self.botm[adjk][i] * self.vertical_exageration - - points.append([xv, yv, zv]) - v1 += 1 - - v0 = v1 - - self.points = points - self.faces = faces - - def _set_vtk_grid_geometry(self): - """ - Method to set vtk's geometry and add it to the vtk grid object - - """ - if self._vtk_geometry_set: - return - - if not self.faces: - self._build_grid_geometry() - - self.vtk_grid = self.__vtk.vtkUnstructuredGrid() - - points = self.__vtk.vtkPoints() - for point in self.points: - points.InsertNextPoint(point) - - self.vtk_grid.SetPoints(points) - - for node in range(self.nnodes): - cell_faces = self.faces[node] - nface = len(cell_faces) - fid_list = self.__vtk.vtkIdList() - fid_list.InsertNextId(nface) - for face in cell_faces: - fid_list.InsertNextId(len(face)) - [fid_list.InsertNextId(i) for i in face] - - self.vtk_grid.InsertNextCell(self.__vtk.VTK_POLYHEDRON, fid_list) - - self._vtk_geometry_set = True - - def _build_hfbs(self, pkg): - """ - Method to add hfb planes to the vtk object - - Parameters - ---------- - pkg : object - flopy hfb object - - """ - from vtk.util import numpy_support - - # check if modflow 6 or modflow 2005 - if hasattr(pkg, "hfb_data"): - mf6 = False - hfb_data = pkg.hfb_data - else: - # asssume that there is no transient hfb data for now - hfb_data = pkg.stress_period_data.array[0] - mf6 = True - - points = [] - faces = [] - array = [] - cnt = 0 - verts = self.modelgrid.cross_section_vertices - verts = np.dstack((verts[0], verts[1])) - botm = np.ravel(self.botm) - for hfb in hfb_data: - if self.modelgrid.grid_type == "structured": - if not mf6: - k = hfb["k"] - cell1 = list(hfb[["k", "irow1", "icol1"]]) - cell2 = list(hfb[["k", "irow2", "icol2"]]) - else: - cell1 = hfb["cellid1"] - cell2 = hfb["cellid2"] - k = cell1[0] - n0, n1 = self.modelgrid.get_node([cell1, cell2]) - - else: - cell1 = hfb["cellid1"] - cell2 = hfb["cellid2"] - if len(cell1) == 2: - k, n0 = cell1 - _, n1 = cell2 - else: - n0 = cell1[0] - n1 = cell1[1] - k = 0 - - array.append(hfb["hydchr"]) - adjn0 = n0 - (k * self.ncpl) - adjn1 = n1 - (k * self.ncpl) - v1 = verts[adjn0] - v2 = verts[adjn1] - - # get top and botm elevations, use max and min: - if k == 0 or self.top.size == self.nnodes: - tv = np.max([self.top[n0], self.top[n1]]) - bv = np.min([botm[n0], botm[n1]]) - else: - tv = np.max([botm[n0 - self.ncpl], botm[n1 - self.ncpl]]) - bv = np.min([botm[n0], botm[n1]]) - - tv *= self.vertical_exageration - bv *= self.vertical_exageration - - pts = [] - for v in v1: - # ix = np.where(v2 == v) - ix = np.where((v2.T[0] == v[0]) & (v2.T[1] == v[1])) - if len(ix[0]) > 0 and len(pts) < 2: - pts.append(v2[ix[0][0]]) - - pts = np.sort(pts)[::-1] - - # create plane, counter-clockwise order - pts = [ - (pts[0, 0], pts[0, 1], tv), - (pts[1, 0], pts[1, 1], tv), - (pts[1, 0], pts[1, 1], bv), - (pts[0, 0], pts[0, 1], bv), - ] - - # add to points and faces - plane_face = [] - for pt in pts: - points.append(pt) - plane_face.append(cnt) - cnt += 1 - - faces.append(plane_face) - - # now create the vtk geometry - vtk_points = self.__vtk.vtkPoints() - for point in points: - vtk_points.InsertNextPoint(point) - - # now create an UnstructuredGrid object - polydata = self.__vtk.vtkUnstructuredGrid() - polydata.SetPoints(vtk_points) - - # create the vtk polygons - for face in faces: - polygon = self.__vtk.vtkPolygon() - polygon.GetPointIds().SetNumberOfIds(4) - for ix, iv in enumerate(face): - polygon.GetPointIds().SetId(ix, iv) - polydata.InsertNextCell( - polygon.GetCellType(), polygon.GetPointIds() - ) - - # and then set the hydchr data - vtk_arr = numpy_support.numpy_to_vtk( - num_array=np.array(array), array_type=self.__vtk.VTK_FLOAT - ) - vtk_arr.SetName("hydchr") - polydata.GetCellData().SetScalars(vtk_arr) - self.vtk_polygons = polydata - - def _build_point_scalar_array(self, array): - """ - Method to build a point scalar array using an inverse distance - weighting scheme - - Parameters - ---------- - array : np.ndarray - ndarray of shape nnodes - - Returns - ------- - np.ndarray of shape n vtk points - """ - isint = False - if np.issubdtype(array[0], np.dtype(int)): - isint = True - - if isint: - # maybe think of a way to do ints with the numpy graph, - # looping through the dict is very inefficient - ps_array = np.zeros((len(self.points),), dtype=array.dtype) - for _, value in self._point_scalar_graph.items(): - for ix, pt in enumerate(value["vtk_points"]): - ps_array[pt] = array[value["idx"][ix]] - else: - ps_graph = self._point_scalar_numpy_graph - ps_array = np.where(ps_graph >= 0, array[ps_graph], np.nan) - - # do inverse distance weighting and apply mask to retain - # nan valued cells because numpy returns 0 when all vals are nan - weighted_vals = self._idw_weight_graph * ps_array - mask = np.isnan(weighted_vals).all(axis=0) - weighted_vals = np.nansum(weighted_vals, axis=0) - weighted_vals[mask] = np.nan - ps_array = weighted_vals / self._idw_total_weight_graph - - return ps_array - - def _add_timevalue(self, index, fname): - """ - Method to add a TimeValue to a vtk object, used with transient arrays - - Parameters - ---------- - index : int, tuple - integer representing kper or a tuple of (kstp, kper) - fname : str - path to the vtu file - - """ - if not self.pvd: - return - - try: - timeval = self._totim[index] - except (IndexError, KeyError): - return - - self.pvd.add_timevalue(fname, timeval) - - def _mask_values(self, array, masked_values): - """ - Method to mask values with nan - - Parameters - ---------- - array : np.ndarray - numpy array of values - masked_values : list - values to convert to nan - - Returns - ------- - numpy array - """ - if masked_values is not None: - try: - for mv in masked_values: - array[array == mv] = np.nan - except ValueError: - pass - - return array - - def add_array(self, array, name, masked_values=None, dtype=None): - """ - Method to set an array to the vtk grid - - Parameters - ---------- - array : np.ndarray, list - list of array values to set to the vtk_array - name : str - array name for vtk - masked_values : list, None - list of values to set equal to nan - dtype : vtk datatype object - method to supply and force a vtk datatype - - """ - from vtk.util import numpy_support - - if not self._vtk_geometry_set: - self._set_vtk_grid_geometry() - - array = np.ravel(array) - - if array.size != self.nnodes: - raise AssertionError("array must be the size as the modelgrid") - - if self.idomain is not None: - try: - array[self.idomain == 0] = np.nan - except ValueError: - pass - - array = self._mask_values(array, masked_values) - - if not self.binary and not self.xml: - # ascii does not properly render nan values - array = np.nan_to_num(array, nan=1e30) - - if self.point_scalars: - array = self._build_point_scalar_array(array) - - if dtype is None: - dtype = self.__vtk.VTK_FLOAT - if np.issubdtype(array[0], np.dtype(int)): - dtype = self.__vtk.VTK_INT - - vtk_arr = numpy_support.numpy_to_vtk(num_array=array, array_type=dtype) - vtk_arr.SetName(name) - - if self.point_scalars: - self.vtk_grid.GetPointData().AddArray(vtk_arr) - else: - self.vtk_grid.GetCellData().AddArray(vtk_arr) - - def add_transient_array(self, d, name=None, masked_values=None): - """ - Method to add transient array data to the vtk object - - Parameters - ---------- - d: dict - dictionary of array2d, arry3d data or numpy array data - name : str, None - parameter name, required when user provides a dictionary - of numpy arrays - masked_values : list, None - list of values to set equal to nan - - Returns - ------- - - """ - if self.__transient_output_data: - raise AssertionError( - "Transient arrays cannot be mixed with transient output, " - "Please create a seperate vtk object for transient package " - "data" - ) - - if not self._vtk_geometry_set: - self._set_vtk_grid_geometry() - - k = list(d.keys())[0] - transient = dict() - if isinstance(d[k], DataInterface): - if d[k].data_type in (DataType.array2d, DataType.array3d): - if name is None: - name = d[k].name - if isinstance(name, list): - name = name[0] - - for kper, value in d.items(): - if value.array.size != self.nnodes: - array = np.zeros(self.nnodes) * np.nan - array[: value.array.size] = np.ravel(value.array) - else: - array = value.array - - array = self._mask_values(array, masked_values) - transient[kper] = array - else: - if name is None: - raise ValueError( - "name must be specified when providing numpy arrays" - ) - for kper, trarray in d.items(): - if trarray.size != self.nnodes: - array = np.zeros(self.nnodes) * np.nan - array[: trarray.size] = np.ravel(trarray) - else: - array = trarray - - array = self._mask_values(array, masked_values) - transient[kper] = array - - for k, v in transient.items(): - if k not in self.__transient_data: - self.__transient_data[k] = {name: v} - else: - self.__transient_data[k][name] = v - - def add_transient_list(self, mflist, masked_values=None): - """ - Method to add transient list data to a vtk object - - Parameters - ---------- - mflist : flopy.utils.MfList object - masked_values : list, None - list of values to set equal to nan - - """ - if not self._vtk_geometry_set: - self._set_vtk_grid_geometry() - - pkg_name = mflist.package.name[0] - mfl = mflist.array - if isinstance(mfl, dict): - for arr_name, arr4d in mflist.array.items(): - d = {kper: array for kper, array in enumerate(arr4d)} - name = f"{pkg_name}_{arr_name}" - self.add_transient_array(d, name) - else: - export = {} - for kper in range(mflist.package.parent.nper): - try: - arr_dict = mflist.to_array(kper, mask=True) - except ValueError: - continue - - if arr_dict is None: - continue - - for arr_name, array in arr_dict.items(): - if arr_name not in export: - export[arr_name] = {kper: array} - else: - export[arr_name][kper] = array - - for arr_name, d in export.items(): - name = f"{pkg_name}_{arr_name}" - self.add_transient_array(d, name, masked_values=masked_values) - - def add_vector(self, vector, name, masked_values=None): - """ - Method to add vector data to vtk - - Parameters - ---------- - vector : array - array of dimension (3, nnodes) - name : str - name of the vector to be displayed in vtk - masked_values : list, None - list of values to set equal to nan - - """ - from vtk.util import numpy_support - - if not self._vtk_geometry_set: - self._set_vtk_grid_geometry() - - if isinstance(vector, (tuple, list)): - vector = np.array(vector) - - if vector.size != 3 * self.nnodes: - if vector.size == 3 * self.ncpl: - vector = np.reshape(vector, (3, self.ncpl)) - tv = np.full((3, self.nnodes), np.nan) - for ix, q in enumerate(vector): - tv[ix, : self.ncpl] = q - vector = tv - else: - raise AssertionError( - "Size of vector must be 3 * nnodes or 3 * ncpl" - ) - else: - vector = np.reshape(vector, (3, self.nnodes)) - - if self.point_scalars: - tmp = [] - for v in vector: - tmp.append(self._build_point_scalar_array(v)) - vector = np.array(tmp) - - vector = self._mask_values(vector, masked_values) - - vtk_arr = numpy_support.numpy_to_vtk( - num_array=vector, array_type=self.__vtk.VTK_FLOAT - ) - vtk_arr.SetName(name) - vtk_arr.SetNumberOfComponents(3) - - if self.point_scalars: - self.vtk_grid.GetPointData().SetVectors(vtk_arr) - else: - self.vtk_grid.GetCellData().SetVectors(vtk_arr) - - def add_transient_vector(self, d, name, masked_values=None): - """ - Method to add transient vector data to vtk - - Paramters - --------- - d : dict - dictionary of vectors - name : str - name of vector to be displayed in vtk - masked_values : list, None - list of values to set equal to nan - - """ - if not self._vtk_geometry_set: - self._set_vtk_grid_geometry() - - if self.__transient_data: - k = list(self.__transient_data.keys())[0] - if len(d) != len(self.__transient_data[k]): - print( - "Transient vector not same size as transient arrays time " - "stamp may be unreliable for vector data in VTK file" - ) - - if isinstance(d, dict): - cnt = 0 - for key, value in d.items(): - if not isinstance(value, np.ndarray): - value = np.array(value) - - if ( - value.size != 3 * self.ncpl - or value.size != 3 * self.nnodes - ): - raise AssertionError( - "Size of vector must be 3 * nnodes or 3 * ncpl" - ) - - value = self._mask_values(value, masked_values) - self.__transient_vector[cnt] = {name: value} - cnt += 1 - - def add_package(self, pkg, masked_values=None): - """ - Method to set all arrays within a package to VTK arrays - - Parameters - ---------- - pkg : flopy.pakbase.Package object - flopy package object, example ModflowWel - masked_values : list, None - list of values to set equal to nan - - """ - if not self._vtk_geometry_set: - self._set_vtk_grid_geometry() - - if "hfb" in pkg.name[0].lower(): - self._build_hfbs(pkg) - return - - for item, value in pkg.__dict__.items(): - if item in VTKIGNORE: - continue - - if isinstance(value, list): - for v in value: - if isinstance(v, Util3d): - if value.array.size != self.nnodes: - continue - self.add_array(v.array, item, masked_values) - - if isinstance(value, DataInterface): - if value.data_type in (DataType.array2d, DataType.array3d): - if value.array is not None: - if value.array.size < self.nnodes: - if value.array.size < self.ncpl: - continue - - array = np.zeros(self.nnodes) * np.nan - array[: value.array.size] = np.ravel(value.array) - - elif value.array.size > self.nnodes and self._ncbd > 0: - # deal with confining beds - array = np.array( - [ - value.array[ix] - for ix, i in enumerate(self._active) - if i != 0 - ] - ) - - else: - array = value.array - - self.add_array(array, item, masked_values) - - elif value.data_type == DataType.transient2d: - if value.array is not None: - if hasattr(value, "transient_2ds"): - self.add_transient_array( - value.transient_2ds, item, masked_values - ) - else: - d = {ix: i for ix, i in enumerate(value.array)} - self.add_transient_array(d, item, masked_values) - - elif value.data_type == DataType.transient3d: - if value.array is not None: - self.add_transient_array( - value.transient_3ds, item, masked_values - ) - - elif value.data_type == DataType.transientlist: - self.add_transient_list(value, masked_values) - - else: - pass - - def add_model(self, model, selpaklist=None, masked_values=None): - """ - Method to add all array and transient list data from a modflow model - to a timeseries of vtk files - - Parameters - ---------- - model : fp.modflow.ModelInterface - any flopy model object - selpaklist : list, None - optional parameter where the user can supply a list of packages - to export. - masked_values : list, None - list of values to set equal to nan - - """ - for package in model.packagelist: - if selpaklist is not None: - if package.name[0] not in selpaklist: - continue - - self.add_package(package, masked_values) - - def add_pathline_points(self, pathlines, timeseries=False): - """ - Method to add Modpath output from a pathline or timeseries file - to the grid. Colors will be representative of totim. - - Parameters: - ---------- - pathlines : np.recarray or list - pathlines accepts a numpy recarray of a particle pathline or - a list of numpy reccarrays associated with pathlines - - timeseries : bool - method to plot data as a series of vtk timeseries files for - animation or as a single static vtk file. Default is false - - """ - if isinstance(pathlines, (np.recarray, np.ndarray)): - pathlines = [pathlines] - - keys = ["particleid", "time"] - if not timeseries: - arrays = {key: [] for key in keys} - points = [] - for recarray in pathlines: - recarray["z"] *= self.vertical_exageration - for rec in recarray: - points.append(tuple(rec[["x", "y", "z"]])) - for key in keys: - arrays[key].append(rec[key]) - - self._set_modpath_point_data(points, arrays) - - else: - self.vtk_pathlines = self.__vtk.vtkUnstructuredGrid() - timeseries_data = {} - points = {} - for recarray in pathlines: - recarray["z"] *= self.vertical_exageration - for rec in recarray: - time = rec["time"] - if time not in points: - points[time] = [tuple(rec[["x", "y", "z"]])] - t = {key: [] for key in keys} - timeseries_data[time] = t - - else: - points[time].append(tuple(rec[["x", "y", "z"]])) - - for key in keys: - timeseries_data[time][key].append(rec[key]) - - self.__pathline_transient_data = timeseries_data - self._pathline_points = points - - def add_heads(self, hds, kstpkper=None, masked_values=None): - """ - Method to add head data to a vtk file - - Parameters - ---------- - hds : flopy.utils.LayerFile object - Binary or Formatted HeadFile type object - kstpkper : tuple, list of tuples, None - tuple or list of tuples of kstpkper, if kstpkper=None all - records are selected - masked_values : list, None - list of values to set equal to nan - - """ - if not self.__transient_output_data and self.__transient_data: - raise AssertionError( - "Head data cannot be mixed with transient package data, " - "Please create a seperate vtk object for transient head data" - ) - - if kstpkper is None: - kstpkper = hds.get_kstpkper() - elif isinstance(kstpkper, (list, tuple)): - if not isinstance(kstpkper[0], (list, tuple)): - kstpkper = [kstpkper] - else: - pass - - # reset totim based on values read from head file - times = hds.get_times() - kstpkpers = hds.get_kstpkper() - self._totim = {ki: time for (ki, time) in zip(kstpkpers, times)} - - text = hds.text.decode() - - d = dict() - for ki in kstpkper: - d[ki] = hds.get_data(ki) - - self.__transient_output_data = False - self.add_transient_array(d, name=text, masked_values=masked_values) - self.__transient_output_data = True - - def add_cell_budget( - self, cbc, text=None, kstpkper=None, masked_values=None - ): - """ - Method to add cell budget data to vtk - - Parameters - ---------- - cbc : flopy.utils.CellBudget object - flopy binary CellBudget object - text : str or None - The text identifier for the record. Examples include - 'RIVER LEAKAGE', 'STORAGE', 'FLOW RIGHT FACE', etc. If text=None - all compatible records are exported - kstpkper : tuple, list of tuples, None - tuple or list of tuples of kstpkper, if kstpkper=None all - records are selected - masked_values : list, None - list of values to set equal to nan - - """ - if not self.__transient_output_data and self.__transient_data: - raise AssertionError( - "Binary data cannot be mixed with transient package data, " - "Please create a seperate vtk object for transient head data" - ) - - records = cbc.get_unique_record_names(decode=True) - imeth_dict = { - record: imeth for (record, imeth) in zip(records, cbc.imethlist) - } - if text is None: - keylist = records - else: - if not isinstance(text, list): - keylist = [text] - else: - keylist = text - - if kstpkper is None: - kstpkper = cbc.get_kstpkper() - elif isinstance(kstpkper, tuple): - if not isinstance(kstpkper[0], (list, tuple)): - kstpkper = [kstpkper] - else: - pass - - # reset totim based on values read from budget file - times = cbc.get_times() - kstpkpers = cbc.get_kstpkper() - self._totim = {ki: time for (ki, time) in zip(kstpkpers, times)} - - for name in keylist: - d = {} - for i, ki in enumerate(kstpkper): - try: - array = cbc.get_data(kstpkper=ki, text=name, full3D=True) - if len(array) == 0: - continue - - array = np.ma.filled(array, np.nan) - if array.size < self.nnodes: - if array.size < self.ncpl: - raise AssertionError( - "Array size must be equal to " - "either ncpl or nnodes" - ) - - array = np.zeros(self.nnodes) * np.nan - array[: array.size] = np.ravel(array) - - except ValueError: - if imeth_dict[name] == 6: - array = np.full((self.nnodes,), np.nan) - rec = cbc.get_data(kstpkper=ki, text=name)[0] - for [node, q] in zip(rec["node"], rec["q"]): - array[node] = q - else: - continue - - d[ki] = array - - self.__transient_output_data = False - self.add_transient_array(d, name, masked_values) - self.__transient_output_data = True - - def _set_modpath_point_data(self, points, d): - """ - Method to build the vtk point geometry and set arrays for - modpath pathlines - - Parameters - ---------- - points : list - list of (x, y, z) points - d : dict - dictionary of numpy arrays to add to vtk - - """ - from vtk.util import numpy_support - - nverts = len(points) - - self.vtk_pathlines = self.__vtk.vtkUnstructuredGrid() - - vtk_points = self.__vtk.vtkPoints() - for point in points: - vtk_points.InsertNextPoint(point) - - self.vtk_pathlines.SetPoints(vtk_points) - - # create a Vertex instance for each point data add to grid - for i in range(nverts): - vertex = self.__vtk.vtkPolyVertex() - vertex.GetPointIds().SetNumberOfIds(1) - vertex.GetPointIds().SetId(0, i) - - # set data to the pathline grid - self.vtk_pathlines.InsertNextCell( - vertex.GetCellType(), vertex.GetPointIds() - ) - - # process arrays and add arrays to grid. - for name, array in d.items(): - array = np.array(array) - vtk_array = numpy_support.numpy_to_vtk( - num_array=array, array_type=self.__vtk.VTK_FLOAT - ) - vtk_array.SetName(name) - self.vtk_pathlines.GetCellData().AddArray(vtk_array) - - def write(self, f, kper=None): - """ - Method to write a vtk file from the VTK object - - Parameters - ---------- - f : str - vtk file name - kpers : int, list, tuple - stress period or list of stress periods to write to vtk. This - parameter only applies to transient package data. - - """ - grids = [self.vtk_grid, self.vtk_polygons, self.vtk_pathlines] - suffix = ["", "_hfb", "_pathline"] - - extension = ".vtk" - if self.pvd: - self.pvd = Pvd() - extension = ".vtu" - - fpth, _ = os.path.split(f) - if not os.path.exists(fpth): - os.mkdir(fpth) - - if kper is not None: - if isinstance(kper, (int, float)): - kper = [int(kper)] - - for ix, grid in enumerate(grids): - if grid is None: - continue - - if not f.endswith(".vtk") and not f.endswith(".vtu"): - foo = f"{f}{suffix[ix]}{extension}" - else: - foo = f"{f[:-4]}{suffix[ix]}{f[-4:]}" - - if not self.xml: - w = self.__vtk.vtkUnstructuredGridWriter() - if self.binary: - w.SetFileTypeToBinary() - else: - w = self.__vtk.vtkXMLUnstructuredGridWriter() - if not self.binary: - w.SetDataModeToAscii() - - if self.__pathline_transient_data and ix == 2: - stp = 0 - for time, d in self.__pathline_transient_data.items(): - tf = self.__create_transient_vtk_path(foo, stp) - points = self._pathline_points[time] - self._set_modpath_point_data(points, d) - - w.SetInputData(self.vtk_pathlines) - w.SetFileName(tf) - w.Update() - stp += 1 - - else: - w.SetInputData(grid) - - if ( - self.__transient_data or self.__transient_vector - ) and ix == 0: - if self.__transient_data: - cnt = 0 - for per, d in self.__transient_data.items(): - if kper is not None: - if per not in kper: - continue - - if self.__transient_output_data: - tf = self.__create_transient_vtk_path(foo, cnt) - else: - tf = self.__create_transient_vtk_path(foo, per) - self._add_timevalue(per, tf) - for name, array in d.items(): - self.add_array(array, name) - - if per in self.__transient_vector: - d = self.__transient_vector[d] - for name, vector in d.items(): - self.add_vector(vector, name) - - w.SetFileName(tf) - w.Update() - cnt += 1 - else: - cnt = 0 - for per, d in self.__transient_vector.items(): - if kper is not None: - if per not in kper: - continue - - if self.__transient_output_data: - tf = self.__create_transient_vtk_path(foo, cnt) - else: - tf = self.__create_transient_vtk_path(foo, per) - self._add_timevalue(per) - for name, vector in d.items(): - self.add_vector(vector, name) - - w.SetFileName(tf) - w.update() - cnt += 1 - else: - w.SetFileName(foo) - w.Update() - - if not type(self.pvd) == bool: - if not f.endswith(".vtu") or f.endswith(".vtk"): - pvdfile = f"{f}.pvd" - else: - pvdfile = f"{f[:-4]}.pvd" - - self.pvd.write(pvdfile) - - def __create_transient_vtk_path(self, path, kper): - """ - Method to set naming convention for transient vtk file series - - Parameters - ---------- - path : str - vtk file path - kper : int - zero based stress period number - - Returns - ------- - updated vtk file path of format _{:06d}.vtk where - {:06d} represents the six zero padded stress period time - """ - pth = ".".join(path.split(".")[:-1]) - if pth.endswith("_"): - pth = pth[:-1] - extension = path.split(".")[-1] - return f"{pth}_{kper :06d}.{extension}" - - -def export_model( - model, - otfolder, - package_names=None, - nanval=-1e20, - smooth=False, - point_scalars=False, - vtk_grid_type="auto", - true2d=False, - binary=True, - kpers=None, -): - """ - DEPRECATED method to export model to vtk - - Parameters - ---------- - - model : flopy model instance - flopy model - otfolder : str - output folder - package_names : list - list of package names to be exported - nanval : scalar - no data value, default value is -1e20 - array2d : bool - True if array is 2d, default is False - smooth : bool - if True, will create smooth layer elevations, default is False - point_scalars : bool - if True, will also output array values at cell vertices, default is - False; note this automatically sets smooth to True - vtk_grid_type : str - Specific vtk_grid_type or 'auto' (default). Possible specific values - are 'ImageData', 'RectilinearGrid', and 'UnstructuredGrid'. - If 'auto', the grid type is automatically determined. Namely: - * A regular grid (in all three directions) will be saved as an - 'ImageData'. - * A rectilinear (in all three directions), non-regular grid - will be saved as a 'RectilinearGrid'. - * Other grids will be saved as 'UnstructuredGrid'. - true2d : bool - If True, the model is expected to be 2d (1 layer, 1 row or 1 column) - and the data will be exported as true 2d data, default is False. - binary : bool - if True the output file will be binary, default is False - kpers : iterable of int - Stress periods to export. If None (default), all stress periods will be - exported. - """ - warnings.warn("export_model is deprecated, please use Vtk.add_model()") - - if nanval != -1e20: - warnings.warn("nanval is deprecated, setting to np.nan") - if true2d: - warnings.warn("true2d is no longer supported, setting to False") - if vtk_grid_type != "auto": - warnings.warn("vtk_grid_type is deprecated, setting to binary") - - vtk = Vtk( - model, - vertical_exageration=1, - binary=binary, - smooth=smooth, - point_scalars=point_scalars, - ) - - vtk.add_model(model, package_names) - - if not os.path.exists(otfolder): - os.mkdir(otfolder) - - name = model.name - vtk.write(os.path.join(otfolder, name), kper=kpers) - - -def export_package( - pak_model, - pak_name, - otfolder, - vtkobj=None, - nanval=-1e20, - smooth=False, - point_scalars=False, - vtk_grid_type="auto", - true2d=False, - binary=True, - kpers=None, -): - """ - DEPRECATED method to export package to vtk - - Parameters - ---------- - - pak_model : flopy model instance - the model of the package - pak_name : str - the name of the package - otfolder : str - output folder to write the data - vtkobj : VTK instance - a vtk object (allows export_package to be called from - export_model) - nanval : scalar - no data value, default value is -1e20 - smooth : bool - if True, will create smooth layer elevations, default is False - point_scalars : bool - if True, will also output array values at cell vertices, default is - False; note this automatically sets smooth to True - vtk_grid_type : str - Specific vtk_grid_type or 'auto' (default). Possible specific values - are 'ImageData', 'RectilinearGrid', and 'UnstructuredGrid'. - If 'auto', the grid type is automatically determined. Namely: - * A regular grid (in all three directions) will be saved as an - 'ImageData'. - * A rectilinear (in all three directions), non-regular grid - will be saved as a 'RectilinearGrid'. - * Other grids will be saved as 'UnstructuredGrid'. - true2d : bool - If True, the model is expected to be 2d (1 layer, 1 row or 1 column) - and the data will be exported as true 2d data, default is False. - binary : bool - if True the output file will be binary, default is False - kpers : iterable of int - Stress periods to export. If None (default), all stress periods will be - exported. - """ - warnings.warn("export_package is deprecated, use Vtk.add_package()") - - if nanval != -1e20: - warnings.warn("nanval is deprecated, setting to np.nan") - if true2d: - warnings.warn("true2d is no longer supported, setting to False") - if vtk_grid_type != "auto": - warnings.warn("vtk_grid_type is deprecated, setting to binary") - - if not vtkobj: - vtk = Vtk( - pak_model, - binary=binary, - smooth=smooth, - point_scalars=point_scalars, - ) - else: - vtk = vtkobj - - if not os.path.exists(otfolder): - os.mkdir(otfolder) - - if isinstance(pak_name, list): - pak_name = pak_name[0] - - p = pak_model.get_package(pak_name) - vtk.add_package(p) - - vtk.write(os.path.join(otfolder, pak_name), kper=kpers) - - -def export_transient( - model, - array, - output_folder, - name, - nanval=-1e20, - array2d=False, - smooth=False, - point_scalars=False, - vtk_grid_type="auto", - true2d=False, - binary=True, - kpers=None, -): - """ - DEPRECATED method to export transient arrays and lists to vtk - - Parameters - ---------- - - model : MFModel - the flopy model instance - array : Transient instance - flopy transient array - output_folder : str - output folder to write the data - name : str - name of array - nanval : scalar - no data value, default value is -1e20 - array2d : bool - True if array is 2d, default is False - smooth : bool - if True, will create smooth layer elevations, default is False - point_scalars : bool - if True, will also output array values at cell vertices, default is - False; note this automatically sets smooth to True - vtk_grid_type : str - Specific vtk_grid_type or 'auto' (default). Possible specific values - are 'ImageData', 'RectilinearGrid', and 'UnstructuredGrid'. - If 'auto', the grid type is automatically determined. Namely: - * A regular grid (in all three directions) will be saved as an - 'ImageData'. - * A rectilinear (in all three directions), non-regular grid - will be saved as a 'RectilinearGrid'. - * Other grids will be saved as 'UnstructuredGrid'. - true2d : bool - If True, the model is expected to be 2d (1 layer, 1 row or 1 column) - and the data will be exported as true 2d data, default is False. - binary : bool - if True the output file will be binary, default is False - kpers : iterable of int - Stress periods to export. If None (default), all stress periods will be - exported. - """ - warnings.warn( - "export_transient is deprecated, use Vtk.add_transient_array() or " - "Vtk.add_transient_list()" - ) - - if nanval != -1e20: - warnings.warn("nanval is deprecated, setting to np.nan") - if true2d: - warnings.warn("true2d is no longer supported, setting to False") - if vtk_grid_type != "auto": - warnings.warn("vtk_grid_type is deprecated, setting to binary") - if array2d: - warnings.warn( - "array2d parameter is deprecated, 2d arrays are " - "handled automatically" - ) - - if not os.path.exists(output_folder): - os.mkdir(output_folder) - - vtk = Vtk(model, binary=binary, smooth=smooth, point_scalars=point_scalars) - - if array.data_type == DataType.transient2d: - if array.array is not None: - if hasattr(array, "transient_2ds"): - vtk.add_transient_array(array.transient_2ds, name) - else: - d = {ix: i for ix, i in enumerate(array.array)} - vtk.add_transient_array(d, name) - - elif array.data_type == DataType.transient3d: - if array.array is None: - vtk.add_transient_array(array.transient_3ds, name) - - elif array.data_type == DataType.transientlist: - vtk.add_transient_list(array) - - else: - raise TypeError(f"type {type(array)} not valid for export_transient") - - vtk.write(os.path.join(output_folder, name), kper=kpers) - - -def export_array( - model, - array, - output_folder, - name, - nanval=-1e20, - array2d=False, - smooth=False, - point_scalars=False, - vtk_grid_type="auto", - true2d=False, - binary=True, -): - """ - DEPRECATED method to export array to vtk - - Parameters - ---------- - - model : flopy model instance - the flopy model instance - array : flopy array - flopy 2d or 3d array - output_folder : str - output folder to write the data - name : str - name of array - nanval : scalar - no data value, default value is -1e20 - array2d : bool - true if the array is 2d and represents the first layer, default is - False - smooth : bool - if True, will create smooth layer elevations, default is False - point_scalars : bool - if True, will also output array values at cell vertices, default is - False; note this automatically sets smooth to True - vtk_grid_type : str - Specific vtk_grid_type or 'auto' (default). Possible specific values - are 'ImageData', 'RectilinearGrid', and 'UnstructuredGrid'. - If 'auto', the grid type is automatically determined. Namely: - * A regular grid (in all three directions) will be saved as an - 'ImageData'. - * A rectilinear (in all three directions), non-regular grid - will be saved as a 'RectilinearGrid'. - * Other grids will be saved as 'UnstructuredGrid'. - true2d : bool - If True, the model is expected to be 2d (1 layer, 1 row or 1 column) - and the data will be exported as true 2d data, default is False. - binary : bool - if True the output file will be binary, default is False - """ - warnings.warn("export_array is deprecated, please use Vtk.add_array()") - - if nanval != -1e20: - warnings.warn("nanval is deprecated, setting to np.nan") - if true2d: - warnings.warn("true2d is no longer supported, setting to False") - if vtk_grid_type != "auto": - warnings.warn("vtk_grid_type is deprecated, setting to binary") - if array2d: - warnings.warn( - "array2d parameter is deprecated, 2d arrays are " - "handled automatically" - ) - - if not os.path.exists(output_folder): - os.mkdir(output_folder) - - if array.size < model.modelgrid.nnodes: - if array.size < model.modelgrid.ncpl: - raise AssertionError( - "Array size must be equal to either ncpl or nnodes" - ) - - array = np.zeros(model.modelgrid.nnodes) * np.nan - array[: array.size] = np.ravel(array) - - vtk = Vtk(model, binary=binary, smooth=smooth, point_scalars=point_scalars) - - vtk.add_array(array, name) - vtk.write(os.path.join(output_folder, name)) - - -def export_heads( - model, - hdsfile, - otfolder, - text="head", - precision="auto", - verbose=False, - nanval=-1e20, - kstpkper=None, - smooth=False, - point_scalars=False, - vtk_grid_type="auto", - true2d=False, - binary=True, -): - """ - DEPRECATED method - - Exports binary head file to vtk - - Parameters - ---------- - - model : MFModel - the flopy model instance - hdsfile : str, HeadFile object - binary head file path or object - otfolder : str - output folder to write the data - text : string - Name of the text string in the head file. Default is 'head'. - precision : str - Precision of data in the head file: 'auto', 'single' or 'double'. - Default is 'auto'. - verbose : bool - If True, write information to the screen. Default is False. - nanval : scalar - no data value, default value is -1e20 - kstpkper : tuple of ints or list of tuple of ints - A tuple containing the time step and stress period (kstp, kper). - The kstp and kper values are zero based. - smooth : bool - if True, will create smooth layer elevations, default is False - point_scalars : bool - if True, will also output array values at cell vertices, default is - False; note this automatically sets smooth to True - vtk_grid_type : str - Specific vtk_grid_type or 'auto' (default). Possible specific values - are 'ImageData', 'RectilinearGrid', and 'UnstructuredGrid'. - If 'auto', the grid type is automatically determined. Namely: - * A regular grid (in all three directions) will be saved as an - 'ImageData'. - * A rectilinear (in all three directions), non-regular grid - will be saved as a 'RectilinearGrid'. - * Other grids will be saved as 'UnstructuredGrid'. - true2d : bool - If True, the model is expected to be 2d (1 layer, 1 row or 1 column) - and the data will be exported as true 2d data, default is False. - binary : bool - if True the output file will be binary, default is False - """ - warnings.warn("export_heads is deprecated, use Vtk.add_heads()") - - if nanval != -1e20: - warnings.warn("nanval is deprecated, setting to np.nan") - if true2d: - warnings.warn("true2d is no longer supported, setting to False") - if vtk_grid_type != "auto": - warnings.warn("vtk_grid_type is deprecated, setting to binary") - - from flopy.utils import HeadFile - - if not os.path.exists(otfolder): - os.mkdir(otfolder) - - if not isinstance(hdsfile, HeadFile): - hds = HeadFile( - hdsfile, text=text, precision=precision, verbose=verbose - ) - else: - hds = hdsfile - - vtk = Vtk(model, binary=binary, smooth=smooth, point_scalars=point_scalars) - - vtk.add_heads(hds, kstpkper) - name = f"{model.name}_{text}" - vtk.write(os.path.join(otfolder, name)) - - -def export_cbc( - model, - cbcfile, - otfolder, - precision="single", - verbose=False, - nanval=-1e20, - kstpkper=None, - text=None, - smooth=False, - point_scalars=False, - vtk_grid_type="auto", - true2d=False, - binary=True, -): - """ - DEPRECATED method - - Exports cell by cell file to vtk - - Parameters - ---------- - - model : flopy model instance - the flopy model instance - cbcfile : str - the cell by cell file - otfolder : str - output folder to write the data - precision : str - Precision of data in the cell by cell file: 'single' or 'double'. - Default is 'single'. - verbose : bool - If True, write information to the screen. Default is False. - nanval : scalar - no data value - kstpkper : tuple of ints or list of tuple of ints - A tuple containing the time step and stress period (kstp, kper). - The kstp and kper values are zero based. - text : str or list of str - The text identifier for the record. Examples include - 'RIVER LEAKAGE', 'STORAGE', 'FLOW RIGHT FACE', etc. - smooth : bool - if True, will create smooth layer elevations, default is False - point_scalars : bool - if True, will also output array values at cell vertices, default is - False; note this automatically sets smooth to True - vtk_grid_type : str - Specific vtk_grid_type or 'auto' (default). Possible specific values - are 'ImageData', 'RectilinearGrid', and 'UnstructuredGrid'. - If 'auto', the grid type is automatically determined. Namely: - * A regular grid (in all three directions) will be saved as an - 'ImageData'. - * A rectilinear (in all three directions), non-regular grid - will be saved as a 'RectilinearGrid'. - * Other grids will be saved as 'UnstructuredGrid'. - true2d : bool - If True, the model is expected to be 2d (1 layer, 1 row or 1 column) - and the data will be exported as true 2d data, default is False. - binary : bool - if True the output file will be binary, default is False - """ - warnings.warn("export_cbc is deprecated, use Vtk.add_cell_budget()") - - if nanval != -1e20: - warnings.warn("nanval is deprecated, setting to np.nan") - if true2d: - warnings.warn("true2d is no longer supported, setting to False") - if vtk_grid_type != "auto": - warnings.warn("vtk_grid_type is deprecated, setting to binary") - - from flopy.utils import CellBudgetFile - - if not os.path.exists(otfolder): - os.mkdir(otfolder) - - if not isinstance(cbcfile, CellBudgetFile): - cbc = CellBudgetFile(cbcfile, precision=precision, verbose=verbose) - else: - cbc = cbcfile - - vtk = Vtk(model, binary=binary, smooth=smooth, point_scalars=point_scalars) - - vtk.add_cell_budget(cbc, text, kstpkper) - fname = f"{model.name}_CBC" - vtk.write(os.path.join(otfolder, fname)) +""" +The vtk module provides functionality for exporting model inputs and +outputs to VTK. +""" + +import os +import warnings +import numpy as np +from ..datbase import DataType, DataInterface +from ..utils import Util3d + +from ..utils import import_optional_dependency + +warnings.simplefilter("always", DeprecationWarning) + + +VTKIGNORE = ("vkcb", "perlen", "steady", "tsmult", "nstp") + + +class Pvd: + """ + Simple class to build a Paraview Data File (PVD) + + """ + + def __init__(self): + self.__data = [ + '\n', + '\n', + "\n", + ] + + def add_timevalue(self, file, timevalue): + """ + Method to add a Dataset record to the pvd file + + Parameters + ---------- + file : str + vtu file name + timevalue : float + time step value in model time + """ + if not file.endswith(".vtu"): + raise AssertionError("File name must end with .vtu ") + + file = os.path.split(file)[-1] + + record = ( + f'\n' + ) + self.__data.append(record) + + def write(self, f): + """ + Method to write a pvd file from the PVD object + + Paramaters + ---------- + f : str + PVD file name + + """ + if not f.endswith(".pvd"): + f += ".pvd" + + with open(f, "w") as foo: + foo.writelines(self.__data) + foo.write("\n") + foo.write("") + + +class Vtk: + """ + Class that builds VTK objects and exports models to VTK files + + + Parameters: + ---------- + model : flopy.ModelInterface object + any flopy model object, example flopy.modflow.Modflow() object + modelgrid : flopy.discretization.Grid object + any flopy modelgrid object, example. VertexGrid + vertical_exageration : float + floating point value to scale vertical exageration of the vtk points + default is 1. + binary : bool + flag that indicates if Vtk will write a binary or text file. Binary + is prefered as paraview has a bug (8/4/2021) where is cannot represent + NaN values from ASCII (non xml) files. In this case no-data values + are set to 1e+30. + xml : bool + flag to write xml based VTK files + pvd : bool + boolean flag to write a paraview pvd file for transient data. This + file maps vtk files to a model time. + shared_points : bool + boolean flag to share points in grid polyhedron construction. Default + is False, as paraview has a bug (8/4/2021) where some polyhedrons will + not properly close when using shared points. If shared_points is True + file size will be slightly smaller. + smooth : bool + boolean flag to interpolate vertex elevations based on shared cell + elevations. Default is False. + point_scalars : bool + boolen flag to write interpolated data at each point based "shared + vertices". + + """ + + def __init__( + self, + model=None, + modelgrid=None, + vertical_exageration=1, + binary=True, + xml=False, + pvd=False, + shared_points=False, + smooth=False, + point_scalars=False, + ): + + vtk = import_optional_dependency("vtk") + + if model is None and modelgrid is None: + raise AssertionError( + "A model or modelgrid must be provided to use Vtk" + ) + + elif model is not None: + self.modelgrid = model.modelgrid + self.modeltime = model.modeltime + else: + self.modelgrid = modelgrid + self.modeltime = None + + self.binary = binary + self.xml = xml + self.pvd = pvd + + if self.pvd and not self.xml: + print( + "Switching to xml, ASCII and standard binary are not " + "supported by Paraview's PVD reader" + ) + self.xml = True + + self.vertical_exageration = vertical_exageration + self.shared_points = shared_points + self.smooth = smooth + self.point_scalars = point_scalars + + self.verts = self.modelgrid.verts + self.iverts = self.modelgrid.iverts + self.nnodes = self.modelgrid.nnodes + + nvpl = 0 + for iv in self.iverts: + nvpl += len(iv) - 1 + + self.nvpl = nvpl + + # method to accomodate DISU grids, do not use modelgrid.ncpl! + self.ncpl = len(self.iverts) + if self.nnodes == len(self.iverts): + self.nlay = 1 + else: + self.nlay = self.modelgrid.nlay + + self._laycbd = self.modelgrid.laycbd + if self._laycbd is None: + self._laycbd = np.zeros((self.nlay,), dtype=int) + + self._active = [] + self._ncbd = 0 + for i in range(self.nlay): + self._active.append(1) + if self._laycbd[i] != 0: + self._active.append(0) + self._ncbd += 1 + + # USG trap + if self.modelgrid.top.size == self.nnodes: + self.top = self.modelgrid.top.reshape(self.nnodes) + else: + self.top = self.modelgrid.top.reshape(self.ncpl) + self.botm = self.modelgrid.botm.reshape(-1, self.ncpl) + + if self.modelgrid.idomain is not None: + self.idomain = self.modelgrid.idomain.reshape(self.nnodes) + else: + self.idomain = np.ones((self.nnodes,), dtype=int) + + if self.modeltime is not None: + perlen = self.modeltime.perlen + self._totim = np.add.accumulate(perlen) + + self.points = [] + self.faces = [] + self.vtk_grid = None + self.vtk_polygons = None + self.vtk_pathlines = None + self._pathline_points = [] + self._point_scalar_graph = None + self._point_scalar_numpy_graph = None + self._idw_weight_graph = None + self._idw_total_weight_graph = None + + self._vtk_geometry_set = False + self.__transient_output_data = False + self.__transient_data = {} + self.__transient_vector = {} + self.__pathline_transient_data = {} + self.__vtk = vtk + + if self.point_scalars: + self._create_point_scalar_graph() + + def _create_smoothed_elevation_graph(self, adjk, top_elev=True): + """ + Method to create a dictionary of shared point elevations + + Parameters + ---------- + adjk : int + confining bed adjusted layer + + Returns + ------- + dict : {vertex number: elevation} + + """ + elevations = {} + for i, iv in enumerate(self.iverts): + iv = iv[1:] + for v in iv: + if v is None: + continue + if not top_elev: + zv = self.botm[adjk][i] * self.vertical_exageration + elif adjk == 0: + zv = self.top[i] * self.vertical_exageration + else: + if self.top.size == self.nnodes: + adji = (adjk * self.ncpl) + i + zv = self.top[adji] * self.vertical_exageration + else: + zv = self.botm[adjk - 1][i] * self.vertical_exageration + + if v in elevations: + elevations[v].append(zv) + else: + elevations[v] = [zv] + + for key in elevations: + elevations[key] = np.mean(elevations[key]) + + return elevations + + def _create_point_scalar_graph(self): + """ + Method to create a point scalar graph to map cells to points + + """ + graph = {} + v0 = 0 + v1 = 0 + nvert = len(self.verts) + shared_points = self.shared_points + if len(self._active) != self.nlay: + shared_points = False + + for k in range(self.nlay): + for i, iv in enumerate(self.iverts): + iv = iv[1:] + adji = (k * self.ncpl) + i + for v in iv: + if v is None: + continue + xvert = self.verts[v, 0] + yvert = self.verts[v, 1] + adjv = v + v1 + if adjv not in graph: + graph[adjv] = { + "vtk_points": [v0], + "idx": [adji], + "xv": [xvert], + "yv": [yvert], + } + else: + graph[adjv]["vtk_points"].append(v0) + graph[adjv]["idx"].append(adji) + graph[adjv]["xv"].append(xvert) + graph[adjv]["yv"].append(yvert) + v0 += 1 + + v1 += nvert + + if k == self.nlay - 1 or not shared_points: + for i, iv in enumerate(self.iverts): + iv = iv[1:] + adji = (k * self.ncpl) + i + for v in iv: + if v is None: + continue + xvert = self.verts[v, 0] + yvert = self.verts[v, 1] + adjv = v + v1 + if adjv not in graph: + graph[adjv] = { + "vtk_points": [v0], + "idx": [adji], + "xv": [xvert], + "yv": [yvert], + } + else: + graph[adjv]["vtk_points"].append(v0) + graph[adjv]["idx"].append(adji) + graph[adjv]["xv"].append(xvert) + graph[adjv]["yv"].append(yvert) + v0 += 1 + v1 += nvert + + # convert this to a numpy representation + max_shared = 0 + num_points = v0 + for k, d in graph.items(): + if len(d["vtk_points"]) > max_shared: + max_shared = len(d["vtk_points"]) + + numpy_graph = np.ones((max_shared, num_points), dtype=int) * -1 + xvert = np.ones((max_shared, num_points)) * np.nan + yvert = np.ones((max_shared, num_points)) * np.nan + for k, d in graph.items(): + for _, pt in enumerate(d["vtk_points"]): + for ixx, value in enumerate(d["idx"]): + numpy_graph[ixx, pt] = value + xvert[ixx, pt] = d["xv"][ixx] + yvert[ixx, pt] = d["yv"][ixx] + + # now create the IDW weights for point scalars + xc = np.ravel(self.modelgrid.xcellcenters) + yc = np.ravel(self.modelgrid.ycellcenters) + + if xc.size != self.nnodes: + xc = np.tile(xc, self.nlay) + yc = np.tile(yc, self.nlay) + + xc = np.where(numpy_graph != -1, xc[numpy_graph], np.nan) + yc = np.where(numpy_graph != -1, yc[numpy_graph], np.nan) + + asq = (xvert - xc) ** 2 + bsq = (yvert - yc) ** 2 + + weights = np.sqrt(asq + bsq) + tot_weights = np.nansum(weights, axis=0) + + self._point_scalar_graph = graph + self._point_scalar_numpy_graph = numpy_graph + self._idw_weight_graph = weights + self._idw_total_weight_graph = tot_weights + + def _build_grid_geometry(self): + """ + Method that creates lists of vertex points and cell faces + + """ + points = [] + faces = [] + v0 = 0 + v1 = 0 + ncb = 0 + shared_points = self.shared_points + if len(self._active) != self.nlay: + shared_points = False + + for k in range(self.nlay): + adjk = k + ncb + if k != self.nlay - 1: + if self._active[adjk + 1] == 0: + ncb += 1 + + if self.smooth: + elevations = self._create_smoothed_elevation_graph(adjk) + + for i, iv in enumerate(self.iverts): + iv = iv[1:] + for v in iv: + if v is None: + continue + xv = self.verts[v, 0] + yv = self.verts[v, 1] + if self.smooth: + zv = elevations[v] + elif k == 0: + zv = self.top[i] * self.vertical_exageration + else: + if self.top.size == self.nnodes: + adji = (adjk * self.ncpl) + i + zv = self.top[adji] * self.vertical_exageration + else: + zv = ( + self.botm[adjk - 1][i] + * self.vertical_exageration + ) + + points.append([xv, yv, zv]) + v1 += 1 + + cell_faces = [ + [v for v in range(v0, v1)], + [v + self.nvpl for v in range(v0, v1)], + ] + + for v in range(v0, v1): + if v != v1 - 1: + cell_faces.append( + [v + 1, v, v + self.nvpl, v + self.nvpl + 1] + ) + else: + cell_faces.append( + [v0, v, v + self.nvpl, v0 + self.nvpl] + ) + + v0 = v1 + faces.append(cell_faces) + + if k == self.nlay - 1 or not shared_points: + if self.smooth: + elevations = self._create_smoothed_elevation_graph( + adjk, top_elev=False + ) + + for i, iv in enumerate(self.iverts): + iv = iv[1:] + for v in iv: + if v is None: + continue + xv = self.verts[v, 0] + yv = self.verts[v, 1] + if self.smooth: + zv = elevations[v] + else: + zv = self.botm[adjk][i] * self.vertical_exageration + + points.append([xv, yv, zv]) + v1 += 1 + + v0 = v1 + + self.points = points + self.faces = faces + + def _set_vtk_grid_geometry(self): + """ + Method to set vtk's geometry and add it to the vtk grid object + + """ + if self._vtk_geometry_set: + return + + if not self.faces: + self._build_grid_geometry() + + self.vtk_grid = self.__vtk.vtkUnstructuredGrid() + + points = self.__vtk.vtkPoints() + for point in self.points: + points.InsertNextPoint(point) + + self.vtk_grid.SetPoints(points) + + for node in range(self.nnodes): + cell_faces = self.faces[node] + nface = len(cell_faces) + fid_list = self.__vtk.vtkIdList() + fid_list.InsertNextId(nface) + for face in cell_faces: + fid_list.InsertNextId(len(face)) + [fid_list.InsertNextId(i) for i in face] + + self.vtk_grid.InsertNextCell(self.__vtk.VTK_POLYHEDRON, fid_list) + + self._vtk_geometry_set = True + + def _build_hfbs(self, pkg): + """ + Method to add hfb planes to the vtk object + + Parameters + ---------- + pkg : object + flopy hfb object + + """ + from vtk.util import numpy_support + + # check if modflow 6 or modflow 2005 + if hasattr(pkg, "hfb_data"): + mf6 = False + hfb_data = pkg.hfb_data + else: + # asssume that there is no transient hfb data for now + hfb_data = pkg.stress_period_data.array[0] + mf6 = True + + points = [] + faces = [] + array = [] + cnt = 0 + verts = self.modelgrid.cross_section_vertices + verts = np.dstack((verts[0], verts[1])) + botm = np.ravel(self.botm) + for hfb in hfb_data: + if self.modelgrid.grid_type == "structured": + if not mf6: + k = hfb["k"] + cell1 = list(hfb[["k", "irow1", "icol1"]]) + cell2 = list(hfb[["k", "irow2", "icol2"]]) + else: + cell1 = hfb["cellid1"] + cell2 = hfb["cellid2"] + k = cell1[0] + n0, n1 = self.modelgrid.get_node([cell1, cell2]) + + else: + cell1 = hfb["cellid1"] + cell2 = hfb["cellid2"] + if len(cell1) == 2: + k, n0 = cell1 + _, n1 = cell2 + else: + n0 = cell1[0] + n1 = cell1[1] + k = 0 + + array.append(hfb["hydchr"]) + adjn0 = n0 - (k * self.ncpl) + adjn1 = n1 - (k * self.ncpl) + v1 = verts[adjn0] + v2 = verts[adjn1] + + # get top and botm elevations, use max and min: + if k == 0 or self.top.size == self.nnodes: + tv = np.max([self.top[n0], self.top[n1]]) + bv = np.min([botm[n0], botm[n1]]) + else: + tv = np.max([botm[n0 - self.ncpl], botm[n1 - self.ncpl]]) + bv = np.min([botm[n0], botm[n1]]) + + tv *= self.vertical_exageration + bv *= self.vertical_exageration + + pts = [] + for v in v1: + # ix = np.where(v2 == v) + ix = np.where((v2.T[0] == v[0]) & (v2.T[1] == v[1])) + if len(ix[0]) > 0 and len(pts) < 2: + pts.append(v2[ix[0][0]]) + + pts = np.sort(pts)[::-1] + + # create plane, counter-clockwise order + pts = [ + (pts[0, 0], pts[0, 1], tv), + (pts[1, 0], pts[1, 1], tv), + (pts[1, 0], pts[1, 1], bv), + (pts[0, 0], pts[0, 1], bv), + ] + + # add to points and faces + plane_face = [] + for pt in pts: + points.append(pt) + plane_face.append(cnt) + cnt += 1 + + faces.append(plane_face) + + # now create the vtk geometry + vtk_points = self.__vtk.vtkPoints() + for point in points: + vtk_points.InsertNextPoint(point) + + # now create an UnstructuredGrid object + polydata = self.__vtk.vtkUnstructuredGrid() + polydata.SetPoints(vtk_points) + + # create the vtk polygons + for face in faces: + polygon = self.__vtk.vtkPolygon() + polygon.GetPointIds().SetNumberOfIds(4) + for ix, iv in enumerate(face): + polygon.GetPointIds().SetId(ix, iv) + polydata.InsertNextCell( + polygon.GetCellType(), polygon.GetPointIds() + ) + + # and then set the hydchr data + vtk_arr = numpy_support.numpy_to_vtk( + num_array=np.array(array), array_type=self.__vtk.VTK_FLOAT + ) + vtk_arr.SetName("hydchr") + polydata.GetCellData().SetScalars(vtk_arr) + self.vtk_polygons = polydata + + def _build_point_scalar_array(self, array): + """ + Method to build a point scalar array using an inverse distance + weighting scheme + + Parameters + ---------- + array : np.ndarray + ndarray of shape nnodes + + Returns + ------- + np.ndarray of shape n vtk points + """ + isint = False + if np.issubdtype(array[0], np.dtype(int)): + isint = True + + if isint: + # maybe think of a way to do ints with the numpy graph, + # looping through the dict is very inefficient + ps_array = np.zeros((len(self.points),), dtype=array.dtype) + for _, value in self._point_scalar_graph.items(): + for ix, pt in enumerate(value["vtk_points"]): + ps_array[pt] = array[value["idx"][ix]] + else: + ps_graph = self._point_scalar_numpy_graph + ps_array = np.where(ps_graph >= 0, array[ps_graph], np.nan) + + # do inverse distance weighting and apply mask to retain + # nan valued cells because numpy returns 0 when all vals are nan + weighted_vals = self._idw_weight_graph * ps_array + mask = np.isnan(weighted_vals).all(axis=0) + weighted_vals = np.nansum(weighted_vals, axis=0) + weighted_vals[mask] = np.nan + ps_array = weighted_vals / self._idw_total_weight_graph + + return ps_array + + def _add_timevalue(self, index, fname): + """ + Method to add a TimeValue to a vtk object, used with transient arrays + + Parameters + ---------- + index : int, tuple + integer representing kper or a tuple of (kstp, kper) + fname : str + path to the vtu file + + """ + if not self.pvd: + return + + try: + timeval = self._totim[index] + except (IndexError, KeyError): + return + + self.pvd.add_timevalue(fname, timeval) + + def _mask_values(self, array, masked_values): + """ + Method to mask values with nan + + Parameters + ---------- + array : np.ndarray + numpy array of values + masked_values : list + values to convert to nan + + Returns + ------- + numpy array + """ + if masked_values is not None: + try: + for mv in masked_values: + array[array == mv] = np.nan + except ValueError: + pass + + return array + + def add_array(self, array, name, masked_values=None, dtype=None): + """ + Method to set an array to the vtk grid + + Parameters + ---------- + array : np.ndarray, list + list of array values to set to the vtk_array + name : str + array name for vtk + masked_values : list, None + list of values to set equal to nan + dtype : vtk datatype object + method to supply and force a vtk datatype + + """ + from vtk.util import numpy_support + + if not self._vtk_geometry_set: + self._set_vtk_grid_geometry() + + array = np.ravel(array) + + if array.size != self.nnodes: + raise AssertionError("array must be the size as the modelgrid") + + if self.idomain is not None: + try: + array[self.idomain == 0] = np.nan + except ValueError: + pass + + array = self._mask_values(array, masked_values) + + if not self.binary and not self.xml: + # ascii does not properly render nan values + array = np.nan_to_num(array, nan=1e30) + + if self.point_scalars: + array = self._build_point_scalar_array(array) + + if dtype is None: + dtype = self.__vtk.VTK_FLOAT + if np.issubdtype(array[0], np.dtype(int)): + dtype = self.__vtk.VTK_INT + + vtk_arr = numpy_support.numpy_to_vtk(num_array=array, array_type=dtype) + vtk_arr.SetName(name) + + if self.point_scalars: + self.vtk_grid.GetPointData().AddArray(vtk_arr) + else: + self.vtk_grid.GetCellData().AddArray(vtk_arr) + + def add_transient_array(self, d, name=None, masked_values=None): + """ + Method to add transient array data to the vtk object + + Parameters + ---------- + d: dict + dictionary of array2d, arry3d data or numpy array data + name : str, None + parameter name, required when user provides a dictionary + of numpy arrays + masked_values : list, None + list of values to set equal to nan + + Returns + ------- + + """ + if self.__transient_output_data: + raise AssertionError( + "Transient arrays cannot be mixed with transient output, " + "Please create a seperate vtk object for transient package " + "data" + ) + + if not self._vtk_geometry_set: + self._set_vtk_grid_geometry() + + k = list(d.keys())[0] + transient = dict() + if isinstance(d[k], DataInterface): + if d[k].data_type in (DataType.array2d, DataType.array3d): + if name is None: + name = d[k].name + if isinstance(name, list): + name = name[0] + + for kper, value in d.items(): + if value.array.size != self.nnodes: + array = np.zeros(self.nnodes) * np.nan + array[: value.array.size] = np.ravel(value.array) + else: + array = value.array + + array = self._mask_values(array, masked_values) + transient[kper] = array + else: + if name is None: + raise ValueError( + "name must be specified when providing numpy arrays" + ) + for kper, trarray in d.items(): + if trarray.size != self.nnodes: + array = np.zeros(self.nnodes) * np.nan + array[: trarray.size] = np.ravel(trarray) + else: + array = trarray + + array = self._mask_values(array, masked_values) + transient[kper] = array + + for k, v in transient.items(): + if k not in self.__transient_data: + self.__transient_data[k] = {name: v} + else: + self.__transient_data[k][name] = v + + def add_transient_list(self, mflist, masked_values=None): + """ + Method to add transient list data to a vtk object + + Parameters + ---------- + mflist : flopy.utils.MfList object + masked_values : list, None + list of values to set equal to nan + + """ + if not self._vtk_geometry_set: + self._set_vtk_grid_geometry() + + pkg_name = mflist.package.name[0] + mfl = mflist.array + if isinstance(mfl, dict): + for arr_name, arr4d in mflist.array.items(): + d = {kper: array for kper, array in enumerate(arr4d)} + name = f"{pkg_name}_{arr_name}" + self.add_transient_array(d, name) + else: + export = {} + for kper in range(mflist.package.parent.nper): + try: + arr_dict = mflist.to_array(kper, mask=True) + except ValueError: + continue + + if arr_dict is None: + continue + + for arr_name, array in arr_dict.items(): + if arr_name not in export: + export[arr_name] = {kper: array} + else: + export[arr_name][kper] = array + + for arr_name, d in export.items(): + name = f"{pkg_name}_{arr_name}" + self.add_transient_array(d, name, masked_values=masked_values) + + def add_vector(self, vector, name, masked_values=None): + """ + Method to add vector data to vtk + + Parameters + ---------- + vector : array + array of dimension (3, nnodes) + name : str + name of the vector to be displayed in vtk + masked_values : list, None + list of values to set equal to nan + + """ + from vtk.util import numpy_support + + if not self._vtk_geometry_set: + self._set_vtk_grid_geometry() + + if isinstance(vector, (tuple, list)): + vector = np.array(vector) + + if vector.size != 3 * self.nnodes: + if vector.size == 3 * self.ncpl: + vector = np.reshape(vector, (3, self.ncpl)) + tv = np.full((3, self.nnodes), np.nan) + for ix, q in enumerate(vector): + tv[ix, : self.ncpl] = q + vector = tv + else: + raise AssertionError( + "Size of vector must be 3 * nnodes or 3 * ncpl" + ) + else: + vector = np.reshape(vector, (3, self.nnodes)) + + if self.point_scalars: + tmp = [] + for v in vector: + tmp.append(self._build_point_scalar_array(v)) + vector = np.array(tmp) + + vector = self._mask_values(vector, masked_values) + + vtk_arr = numpy_support.numpy_to_vtk( + num_array=vector, array_type=self.__vtk.VTK_FLOAT + ) + vtk_arr.SetName(name) + vtk_arr.SetNumberOfComponents(3) + + if self.point_scalars: + self.vtk_grid.GetPointData().SetVectors(vtk_arr) + else: + self.vtk_grid.GetCellData().SetVectors(vtk_arr) + + def add_transient_vector(self, d, name, masked_values=None): + """ + Method to add transient vector data to vtk + + Paramters + --------- + d : dict + dictionary of vectors + name : str + name of vector to be displayed in vtk + masked_values : list, None + list of values to set equal to nan + + """ + if not self._vtk_geometry_set: + self._set_vtk_grid_geometry() + + if self.__transient_data: + k = list(self.__transient_data.keys())[0] + if len(d) != len(self.__transient_data[k]): + print( + "Transient vector not same size as transient arrays time " + "stamp may be unreliable for vector data in VTK file" + ) + + if isinstance(d, dict): + cnt = 0 + for key, value in d.items(): + if not isinstance(value, np.ndarray): + value = np.array(value) + + if ( + value.size != 3 * self.ncpl + or value.size != 3 * self.nnodes + ): + raise AssertionError( + "Size of vector must be 3 * nnodes or 3 * ncpl" + ) + + value = self._mask_values(value, masked_values) + self.__transient_vector[cnt] = {name: value} + cnt += 1 + + def add_package(self, pkg, masked_values=None): + """ + Method to set all arrays within a package to VTK arrays + + Parameters + ---------- + pkg : flopy.pakbase.Package object + flopy package object, example ModflowWel + masked_values : list, None + list of values to set equal to nan + + """ + if not self._vtk_geometry_set: + self._set_vtk_grid_geometry() + + if "hfb" in pkg.name[0].lower(): + self._build_hfbs(pkg) + return + + for item, value in pkg.__dict__.items(): + if item in VTKIGNORE: + continue + + if isinstance(value, list): + for v in value: + if isinstance(v, Util3d): + if value.array.size != self.nnodes: + continue + self.add_array(v.array, item, masked_values) + + if isinstance(value, DataInterface): + if value.data_type in (DataType.array2d, DataType.array3d): + if value.array is not None: + if value.array.size < self.nnodes: + if value.array.size < self.ncpl: + continue + + array = np.zeros(self.nnodes) * np.nan + array[: value.array.size] = np.ravel(value.array) + + elif value.array.size > self.nnodes and self._ncbd > 0: + # deal with confining beds + array = np.array( + [ + value.array[ix] + for ix, i in enumerate(self._active) + if i != 0 + ] + ) + + else: + array = value.array + + self.add_array(array, item, masked_values) + + elif value.data_type == DataType.transient2d: + if value.array is not None: + if hasattr(value, "transient_2ds"): + self.add_transient_array( + value.transient_2ds, item, masked_values + ) + else: + d = {ix: i for ix, i in enumerate(value.array)} + self.add_transient_array(d, item, masked_values) + + elif value.data_type == DataType.transient3d: + if value.array is not None: + self.add_transient_array( + value.transient_3ds, item, masked_values + ) + + elif value.data_type == DataType.transientlist: + self.add_transient_list(value, masked_values) + + else: + pass + + def add_model(self, model, selpaklist=None, masked_values=None): + """ + Method to add all array and transient list data from a modflow model + to a timeseries of vtk files + + Parameters + ---------- + model : fp.modflow.ModelInterface + any flopy model object + selpaklist : list, None + optional parameter where the user can supply a list of packages + to export. + masked_values : list, None + list of values to set equal to nan + + """ + for package in model.packagelist: + if selpaklist is not None: + if package.name[0] not in selpaklist: + continue + + self.add_package(package, masked_values) + + def add_pathline_points(self, pathlines, timeseries=False): + """ + Method to add Modpath output from a pathline or timeseries file + to the grid. Colors will be representative of totim. + + Parameters: + ---------- + pathlines : np.recarray or list + pathlines accepts a numpy recarray of a particle pathline or + a list of numpy reccarrays associated with pathlines + + timeseries : bool + method to plot data as a series of vtk timeseries files for + animation or as a single static vtk file. Default is false + + """ + if isinstance(pathlines, (np.recarray, np.ndarray)): + pathlines = [pathlines] + + keys = ["particleid", "time"] + if not timeseries: + arrays = {key: [] for key in keys} + points = [] + for recarray in pathlines: + recarray["z"] *= self.vertical_exageration + for rec in recarray: + points.append(tuple(rec[["x", "y", "z"]])) + for key in keys: + arrays[key].append(rec[key]) + + self._set_modpath_point_data(points, arrays) + + else: + self.vtk_pathlines = self.__vtk.vtkUnstructuredGrid() + timeseries_data = {} + points = {} + for recarray in pathlines: + recarray["z"] *= self.vertical_exageration + for rec in recarray: + time = rec["time"] + if time not in points: + points[time] = [tuple(rec[["x", "y", "z"]])] + t = {key: [] for key in keys} + timeseries_data[time] = t + + else: + points[time].append(tuple(rec[["x", "y", "z"]])) + + for key in keys: + timeseries_data[time][key].append(rec[key]) + + self.__pathline_transient_data = timeseries_data + self._pathline_points = points + + def add_heads(self, hds, kstpkper=None, masked_values=None): + """ + Method to add head data to a vtk file + + Parameters + ---------- + hds : flopy.utils.LayerFile object + Binary or Formatted HeadFile type object + kstpkper : tuple, list of tuples, None + tuple or list of tuples of kstpkper, if kstpkper=None all + records are selected + masked_values : list, None + list of values to set equal to nan + + """ + if not self.__transient_output_data and self.__transient_data: + raise AssertionError( + "Head data cannot be mixed with transient package data, " + "Please create a seperate vtk object for transient head data" + ) + + if kstpkper is None: + kstpkper = hds.get_kstpkper() + elif isinstance(kstpkper, (list, tuple)): + if not isinstance(kstpkper[0], (list, tuple)): + kstpkper = [kstpkper] + else: + pass + + # reset totim based on values read from head file + times = hds.get_times() + kstpkpers = hds.get_kstpkper() + self._totim = {ki: time for (ki, time) in zip(kstpkpers, times)} + + text = hds.text.decode() + + d = dict() + for ki in kstpkper: + d[ki] = hds.get_data(ki) + + self.__transient_output_data = False + self.add_transient_array(d, name=text, masked_values=masked_values) + self.__transient_output_data = True + + def add_cell_budget( + self, cbc, text=None, kstpkper=None, masked_values=None + ): + """ + Method to add cell budget data to vtk + + Parameters + ---------- + cbc : flopy.utils.CellBudget object + flopy binary CellBudget object + text : str or None + The text identifier for the record. Examples include + 'RIVER LEAKAGE', 'STORAGE', 'FLOW RIGHT FACE', etc. If text=None + all compatible records are exported + kstpkper : tuple, list of tuples, None + tuple or list of tuples of kstpkper, if kstpkper=None all + records are selected + masked_values : list, None + list of values to set equal to nan + + """ + if not self.__transient_output_data and self.__transient_data: + raise AssertionError( + "Binary data cannot be mixed with transient package data, " + "Please create a seperate vtk object for transient head data" + ) + + records = cbc.get_unique_record_names(decode=True) + imeth_dict = { + record: imeth for (record, imeth) in zip(records, cbc.imethlist) + } + if text is None: + keylist = records + else: + if not isinstance(text, list): + keylist = [text] + else: + keylist = text + + if kstpkper is None: + kstpkper = cbc.get_kstpkper() + elif isinstance(kstpkper, tuple): + if not isinstance(kstpkper[0], (list, tuple)): + kstpkper = [kstpkper] + else: + pass + + # reset totim based on values read from budget file + times = cbc.get_times() + kstpkpers = cbc.get_kstpkper() + self._totim = {ki: time for (ki, time) in zip(kstpkpers, times)} + + for name in keylist: + d = {} + for i, ki in enumerate(kstpkper): + try: + array = cbc.get_data(kstpkper=ki, text=name, full3D=True) + if len(array) == 0: + continue + + array = np.ma.filled(array, np.nan) + if array.size < self.nnodes: + if array.size < self.ncpl: + raise AssertionError( + "Array size must be equal to " + "either ncpl or nnodes" + ) + + array = np.zeros(self.nnodes) * np.nan + array[: array.size] = np.ravel(array) + + except ValueError: + if imeth_dict[name] == 6: + array = np.full((self.nnodes,), np.nan) + rec = cbc.get_data(kstpkper=ki, text=name)[0] + for [node, q] in zip(rec["node"], rec["q"]): + array[node] = q + else: + continue + + d[ki] = array + + self.__transient_output_data = False + self.add_transient_array(d, name, masked_values) + self.__transient_output_data = True + + def _set_modpath_point_data(self, points, d): + """ + Method to build the vtk point geometry and set arrays for + modpath pathlines + + Parameters + ---------- + points : list + list of (x, y, z) points + d : dict + dictionary of numpy arrays to add to vtk + + """ + from vtk.util import numpy_support + + nverts = len(points) + + self.vtk_pathlines = self.__vtk.vtkUnstructuredGrid() + + vtk_points = self.__vtk.vtkPoints() + for point in points: + vtk_points.InsertNextPoint(point) + + self.vtk_pathlines.SetPoints(vtk_points) + + # create a Vertex instance for each point data add to grid + for i in range(nverts): + vertex = self.__vtk.vtkPolyVertex() + vertex.GetPointIds().SetNumberOfIds(1) + vertex.GetPointIds().SetId(0, i) + + # set data to the pathline grid + self.vtk_pathlines.InsertNextCell( + vertex.GetCellType(), vertex.GetPointIds() + ) + + # process arrays and add arrays to grid. + for name, array in d.items(): + array = np.array(array) + vtk_array = numpy_support.numpy_to_vtk( + num_array=array, array_type=self.__vtk.VTK_FLOAT + ) + vtk_array.SetName(name) + self.vtk_pathlines.GetCellData().AddArray(vtk_array) + + def write(self, f, kper=None): + """ + Method to write a vtk file from the VTK object + + Parameters + ---------- + f : str + vtk file name + kpers : int, list, tuple + stress period or list of stress periods to write to vtk. This + parameter only applies to transient package data. + + """ + grids = [self.vtk_grid, self.vtk_polygons, self.vtk_pathlines] + suffix = ["", "_hfb", "_pathline"] + + extension = ".vtk" + if self.pvd: + self.pvd = Pvd() + extension = ".vtu" + + fpth, _ = os.path.split(f) + if not os.path.exists(fpth): + os.mkdir(fpth) + + if kper is not None: + if isinstance(kper, (int, float)): + kper = [int(kper)] + + for ix, grid in enumerate(grids): + if grid is None: + continue + + if not f.endswith(".vtk") and not f.endswith(".vtu"): + foo = f"{f}{suffix[ix]}{extension}" + else: + foo = f"{f[:-4]}{suffix[ix]}{f[-4:]}" + + if not self.xml: + w = self.__vtk.vtkUnstructuredGridWriter() + if self.binary: + w.SetFileTypeToBinary() + else: + w = self.__vtk.vtkXMLUnstructuredGridWriter() + if not self.binary: + w.SetDataModeToAscii() + + if self.__pathline_transient_data and ix == 2: + stp = 0 + for time, d in self.__pathline_transient_data.items(): + tf = self.__create_transient_vtk_path(foo, stp) + points = self._pathline_points[time] + self._set_modpath_point_data(points, d) + + w.SetInputData(self.vtk_pathlines) + w.SetFileName(tf) + w.Update() + stp += 1 + + else: + w.SetInputData(grid) + + if ( + self.__transient_data or self.__transient_vector + ) and ix == 0: + if self.__transient_data: + cnt = 0 + for per, d in self.__transient_data.items(): + if kper is not None: + if per not in kper: + continue + + if self.__transient_output_data: + tf = self.__create_transient_vtk_path(foo, cnt) + else: + tf = self.__create_transient_vtk_path(foo, per) + self._add_timevalue(per, tf) + for name, array in d.items(): + self.add_array(array, name) + + if per in self.__transient_vector: + d = self.__transient_vector[d] + for name, vector in d.items(): + self.add_vector(vector, name) + + w.SetFileName(tf) + w.Update() + cnt += 1 + else: + cnt = 0 + for per, d in self.__transient_vector.items(): + if kper is not None: + if per not in kper: + continue + + if self.__transient_output_data: + tf = self.__create_transient_vtk_path(foo, cnt) + else: + tf = self.__create_transient_vtk_path(foo, per) + self._add_timevalue(per) + for name, vector in d.items(): + self.add_vector(vector, name) + + w.SetFileName(tf) + w.update() + cnt += 1 + else: + w.SetFileName(foo) + w.Update() + + if not type(self.pvd) == bool: + if not f.endswith(".vtu") or f.endswith(".vtk"): + pvdfile = f"{f}.pvd" + else: + pvdfile = f"{f[:-4]}.pvd" + + self.pvd.write(pvdfile) + + def __create_transient_vtk_path(self, path, kper): + """ + Method to set naming convention for transient vtk file series + + Parameters + ---------- + path : str + vtk file path + kper : int + zero based stress period number + + Returns + ------- + updated vtk file path of format _{:06d}.vtk where + {:06d} represents the six zero padded stress period time + """ + pth = ".".join(path.split(".")[:-1]) + if pth.endswith("_"): + pth = pth[:-1] + extension = path.split(".")[-1] + return f"{pth}_{kper :06d}.{extension}" + + +def export_model( + model, + otfolder, + package_names=None, + nanval=-1e20, + smooth=False, + point_scalars=False, + vtk_grid_type="auto", + true2d=False, + binary=True, + kpers=None, +): + """ + DEPRECATED method to export model to vtk + + Parameters + ---------- + + model : flopy model instance + flopy model + otfolder : str + output folder + package_names : list + list of package names to be exported + nanval : scalar + no data value, default value is -1e20 + array2d : bool + True if array is 2d, default is False + smooth : bool + if True, will create smooth layer elevations, default is False + point_scalars : bool + if True, will also output array values at cell vertices, default is + False; note this automatically sets smooth to True + vtk_grid_type : str + Specific vtk_grid_type or 'auto' (default). Possible specific values + are 'ImageData', 'RectilinearGrid', and 'UnstructuredGrid'. + If 'auto', the grid type is automatically determined. Namely: + * A regular grid (in all three directions) will be saved as an + 'ImageData'. + * A rectilinear (in all three directions), non-regular grid + will be saved as a 'RectilinearGrid'. + * Other grids will be saved as 'UnstructuredGrid'. + true2d : bool + If True, the model is expected to be 2d (1 layer, 1 row or 1 column) + and the data will be exported as true 2d data, default is False. + binary : bool + if True the output file will be binary, default is False + kpers : iterable of int + Stress periods to export. If None (default), all stress periods will be + exported. + """ + warnings.warn("export_model is deprecated, please use Vtk.add_model()") + + if nanval != -1e20: + warnings.warn("nanval is deprecated, setting to np.nan") + if true2d: + warnings.warn("true2d is no longer supported, setting to False") + if vtk_grid_type != "auto": + warnings.warn("vtk_grid_type is deprecated, setting to binary") + + vtk = Vtk( + model, + vertical_exageration=1, + binary=binary, + smooth=smooth, + point_scalars=point_scalars, + ) + + vtk.add_model(model, package_names) + + if not os.path.exists(otfolder): + os.mkdir(otfolder) + + name = model.name + vtk.write(os.path.join(otfolder, name), kper=kpers) + + +def export_package( + pak_model, + pak_name, + otfolder, + vtkobj=None, + nanval=-1e20, + smooth=False, + point_scalars=False, + vtk_grid_type="auto", + true2d=False, + binary=True, + kpers=None, +): + """ + DEPRECATED method to export package to vtk + + Parameters + ---------- + + pak_model : flopy model instance + the model of the package + pak_name : str + the name of the package + otfolder : str + output folder to write the data + vtkobj : VTK instance + a vtk object (allows export_package to be called from + export_model) + nanval : scalar + no data value, default value is -1e20 + smooth : bool + if True, will create smooth layer elevations, default is False + point_scalars : bool + if True, will also output array values at cell vertices, default is + False; note this automatically sets smooth to True + vtk_grid_type : str + Specific vtk_grid_type or 'auto' (default). Possible specific values + are 'ImageData', 'RectilinearGrid', and 'UnstructuredGrid'. + If 'auto', the grid type is automatically determined. Namely: + * A regular grid (in all three directions) will be saved as an + 'ImageData'. + * A rectilinear (in all three directions), non-regular grid + will be saved as a 'RectilinearGrid'. + * Other grids will be saved as 'UnstructuredGrid'. + true2d : bool + If True, the model is expected to be 2d (1 layer, 1 row or 1 column) + and the data will be exported as true 2d data, default is False. + binary : bool + if True the output file will be binary, default is False + kpers : iterable of int + Stress periods to export. If None (default), all stress periods will be + exported. + """ + warnings.warn("export_package is deprecated, use Vtk.add_package()") + + if nanval != -1e20: + warnings.warn("nanval is deprecated, setting to np.nan") + if true2d: + warnings.warn("true2d is no longer supported, setting to False") + if vtk_grid_type != "auto": + warnings.warn("vtk_grid_type is deprecated, setting to binary") + + if not vtkobj: + vtk = Vtk( + pak_model, + binary=binary, + smooth=smooth, + point_scalars=point_scalars, + ) + else: + vtk = vtkobj + + if not os.path.exists(otfolder): + os.mkdir(otfolder) + + if isinstance(pak_name, list): + pak_name = pak_name[0] + + p = pak_model.get_package(pak_name) + vtk.add_package(p) + + vtk.write(os.path.join(otfolder, pak_name), kper=kpers) + + +def export_transient( + model, + array, + output_folder, + name, + nanval=-1e20, + array2d=False, + smooth=False, + point_scalars=False, + vtk_grid_type="auto", + true2d=False, + binary=True, + kpers=None, +): + """ + DEPRECATED method to export transient arrays and lists to vtk + + Parameters + ---------- + + model : MFModel + the flopy model instance + array : Transient instance + flopy transient array + output_folder : str + output folder to write the data + name : str + name of array + nanval : scalar + no data value, default value is -1e20 + array2d : bool + True if array is 2d, default is False + smooth : bool + if True, will create smooth layer elevations, default is False + point_scalars : bool + if True, will also output array values at cell vertices, default is + False; note this automatically sets smooth to True + vtk_grid_type : str + Specific vtk_grid_type or 'auto' (default). Possible specific values + are 'ImageData', 'RectilinearGrid', and 'UnstructuredGrid'. + If 'auto', the grid type is automatically determined. Namely: + * A regular grid (in all three directions) will be saved as an + 'ImageData'. + * A rectilinear (in all three directions), non-regular grid + will be saved as a 'RectilinearGrid'. + * Other grids will be saved as 'UnstructuredGrid'. + true2d : bool + If True, the model is expected to be 2d (1 layer, 1 row or 1 column) + and the data will be exported as true 2d data, default is False. + binary : bool + if True the output file will be binary, default is False + kpers : iterable of int + Stress periods to export. If None (default), all stress periods will be + exported. + """ + warnings.warn( + "export_transient is deprecated, use Vtk.add_transient_array() or " + "Vtk.add_transient_list()" + ) + + if nanval != -1e20: + warnings.warn("nanval is deprecated, setting to np.nan") + if true2d: + warnings.warn("true2d is no longer supported, setting to False") + if vtk_grid_type != "auto": + warnings.warn("vtk_grid_type is deprecated, setting to binary") + if array2d: + warnings.warn( + "array2d parameter is deprecated, 2d arrays are " + "handled automatically" + ) + + if not os.path.exists(output_folder): + os.mkdir(output_folder) + + vtk = Vtk(model, binary=binary, smooth=smooth, point_scalars=point_scalars) + + if array.data_type == DataType.transient2d: + if array.array is not None: + if hasattr(array, "transient_2ds"): + vtk.add_transient_array(array.transient_2ds, name) + else: + d = {ix: i for ix, i in enumerate(array.array)} + vtk.add_transient_array(d, name) + + elif array.data_type == DataType.transient3d: + if array.array is None: + vtk.add_transient_array(array.transient_3ds, name) + + elif array.data_type == DataType.transientlist: + vtk.add_transient_list(array) + + else: + raise TypeError(f"type {type(array)} not valid for export_transient") + + vtk.write(os.path.join(output_folder, name), kper=kpers) + + +def export_array( + model, + array, + output_folder, + name, + nanval=-1e20, + array2d=False, + smooth=False, + point_scalars=False, + vtk_grid_type="auto", + true2d=False, + binary=True, +): + """ + DEPRECATED method to export array to vtk + + Parameters + ---------- + + model : flopy model instance + the flopy model instance + array : flopy array + flopy 2d or 3d array + output_folder : str + output folder to write the data + name : str + name of array + nanval : scalar + no data value, default value is -1e20 + array2d : bool + true if the array is 2d and represents the first layer, default is + False + smooth : bool + if True, will create smooth layer elevations, default is False + point_scalars : bool + if True, will also output array values at cell vertices, default is + False; note this automatically sets smooth to True + vtk_grid_type : str + Specific vtk_grid_type or 'auto' (default). Possible specific values + are 'ImageData', 'RectilinearGrid', and 'UnstructuredGrid'. + If 'auto', the grid type is automatically determined. Namely: + * A regular grid (in all three directions) will be saved as an + 'ImageData'. + * A rectilinear (in all three directions), non-regular grid + will be saved as a 'RectilinearGrid'. + * Other grids will be saved as 'UnstructuredGrid'. + true2d : bool + If True, the model is expected to be 2d (1 layer, 1 row or 1 column) + and the data will be exported as true 2d data, default is False. + binary : bool + if True the output file will be binary, default is False + """ + warnings.warn("export_array is deprecated, please use Vtk.add_array()") + + if nanval != -1e20: + warnings.warn("nanval is deprecated, setting to np.nan") + if true2d: + warnings.warn("true2d is no longer supported, setting to False") + if vtk_grid_type != "auto": + warnings.warn("vtk_grid_type is deprecated, setting to binary") + if array2d: + warnings.warn( + "array2d parameter is deprecated, 2d arrays are " + "handled automatically" + ) + + if not os.path.exists(output_folder): + os.mkdir(output_folder) + + if array.size < model.modelgrid.nnodes: + if array.size < model.modelgrid.ncpl: + raise AssertionError( + "Array size must be equal to either ncpl or nnodes" + ) + + array = np.zeros(model.modelgrid.nnodes) * np.nan + array[: array.size] = np.ravel(array) + + vtk = Vtk(model, binary=binary, smooth=smooth, point_scalars=point_scalars) + + vtk.add_array(array, name) + vtk.write(os.path.join(output_folder, name)) + + +def export_heads( + model, + hdsfile, + otfolder, + text="head", + precision="auto", + verbose=False, + nanval=-1e20, + kstpkper=None, + smooth=False, + point_scalars=False, + vtk_grid_type="auto", + true2d=False, + binary=True, +): + """ + DEPRECATED method + + Exports binary head file to vtk + + Parameters + ---------- + + model : MFModel + the flopy model instance + hdsfile : str, HeadFile object + binary head file path or object + otfolder : str + output folder to write the data + text : string + Name of the text string in the head file. Default is 'head'. + precision : str + Precision of data in the head file: 'auto', 'single' or 'double'. + Default is 'auto'. + verbose : bool + If True, write information to the screen. Default is False. + nanval : scalar + no data value, default value is -1e20 + kstpkper : tuple of ints or list of tuple of ints + A tuple containing the time step and stress period (kstp, kper). + The kstp and kper values are zero based. + smooth : bool + if True, will create smooth layer elevations, default is False + point_scalars : bool + if True, will also output array values at cell vertices, default is + False; note this automatically sets smooth to True + vtk_grid_type : str + Specific vtk_grid_type or 'auto' (default). Possible specific values + are 'ImageData', 'RectilinearGrid', and 'UnstructuredGrid'. + If 'auto', the grid type is automatically determined. Namely: + * A regular grid (in all three directions) will be saved as an + 'ImageData'. + * A rectilinear (in all three directions), non-regular grid + will be saved as a 'RectilinearGrid'. + * Other grids will be saved as 'UnstructuredGrid'. + true2d : bool + If True, the model is expected to be 2d (1 layer, 1 row or 1 column) + and the data will be exported as true 2d data, default is False. + binary : bool + if True the output file will be binary, default is False + """ + warnings.warn("export_heads is deprecated, use Vtk.add_heads()") + + if nanval != -1e20: + warnings.warn("nanval is deprecated, setting to np.nan") + if true2d: + warnings.warn("true2d is no longer supported, setting to False") + if vtk_grid_type != "auto": + warnings.warn("vtk_grid_type is deprecated, setting to binary") + + from ..utils import HeadFile + + if not os.path.exists(otfolder): + os.mkdir(otfolder) + + if not isinstance(hdsfile, HeadFile): + hds = HeadFile( + hdsfile, text=text, precision=precision, verbose=verbose + ) + else: + hds = hdsfile + + vtk = Vtk(model, binary=binary, smooth=smooth, point_scalars=point_scalars) + + vtk.add_heads(hds, kstpkper) + name = f"{model.name}_{text}" + vtk.write(os.path.join(otfolder, name)) + + +def export_cbc( + model, + cbcfile, + otfolder, + precision="single", + verbose=False, + nanval=-1e20, + kstpkper=None, + text=None, + smooth=False, + point_scalars=False, + vtk_grid_type="auto", + true2d=False, + binary=True, +): + """ + DEPRECATED method + + Exports cell by cell file to vtk + + Parameters + ---------- + + model : flopy model instance + the flopy model instance + cbcfile : str + the cell by cell file + otfolder : str + output folder to write the data + precision : str + Precision of data in the cell by cell file: 'single' or 'double'. + Default is 'single'. + verbose : bool + If True, write information to the screen. Default is False. + nanval : scalar + no data value + kstpkper : tuple of ints or list of tuple of ints + A tuple containing the time step and stress period (kstp, kper). + The kstp and kper values are zero based. + text : str or list of str + The text identifier for the record. Examples include + 'RIVER LEAKAGE', 'STORAGE', 'FLOW RIGHT FACE', etc. + smooth : bool + if True, will create smooth layer elevations, default is False + point_scalars : bool + if True, will also output array values at cell vertices, default is + False; note this automatically sets smooth to True + vtk_grid_type : str + Specific vtk_grid_type or 'auto' (default). Possible specific values + are 'ImageData', 'RectilinearGrid', and 'UnstructuredGrid'. + If 'auto', the grid type is automatically determined. Namely: + * A regular grid (in all three directions) will be saved as an + 'ImageData'. + * A rectilinear (in all three directions), non-regular grid + will be saved as a 'RectilinearGrid'. + * Other grids will be saved as 'UnstructuredGrid'. + true2d : bool + If True, the model is expected to be 2d (1 layer, 1 row or 1 column) + and the data will be exported as true 2d data, default is False. + binary : bool + if True the output file will be binary, default is False + """ + warnings.warn("export_cbc is deprecated, use Vtk.add_cell_budget()") + + if nanval != -1e20: + warnings.warn("nanval is deprecated, setting to np.nan") + if true2d: + warnings.warn("true2d is no longer supported, setting to False") + if vtk_grid_type != "auto": + warnings.warn("vtk_grid_type is deprecated, setting to binary") + + from ..utils import CellBudgetFile + + if not os.path.exists(otfolder): + os.mkdir(otfolder) + + if not isinstance(cbcfile, CellBudgetFile): + cbc = CellBudgetFile(cbcfile, precision=precision, verbose=verbose) + else: + cbc = cbcfile + + vtk = Vtk(model, binary=binary, smooth=smooth, point_scalars=point_scalars) + + vtk.add_cell_budget(cbc, text, kstpkper) + fname = f"{model.name}_CBC" + vtk.write(os.path.join(otfolder, fname)) diff --git a/flopy/mbase.py b/flopy/mbase.py index 790659ba52..fa13f322ba 100644 --- a/flopy/mbase.py +++ b/flopy/mbase.py @@ -16,7 +16,7 @@ from subprocess import Popen, PIPE, STDOUT import copy import numpy as np -from flopy import utils, discretization +from . import utils, discretization from .version import __version__ from .discretization.grid import Grid @@ -1584,7 +1584,7 @@ def plot(self, SelPackList=None, **kwargs): >>> ml.plot() """ - from flopy.plot import PlotUtilities + from .plot import PlotUtilities axes = PlotUtilities._plot_model_helper( self, SelPackList=SelPackList, **kwargs diff --git a/flopy/mf6/data/mfdata.py b/flopy/mf6/data/mfdata.py index 1fc4935b9b..b7d0fdf774 100644 --- a/flopy/mf6/data/mfdata.py +++ b/flopy/mf6/data/mfdata.py @@ -344,7 +344,7 @@ def _tas_info(tas_str): return None, None def export(self, f, **kwargs): - from flopy.export import utils + from ...export import utils if ( self.data_type == DataType.array2d diff --git a/flopy/mf6/data/mfdataarray.py b/flopy/mf6/data/mfdataarray.py index cd4a24fa7e..12d64df0a0 100644 --- a/flopy/mf6/data/mfdataarray.py +++ b/flopy/mf6/data/mfdataarray.py @@ -1403,7 +1403,7 @@ def plot( Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. """ - from flopy.plot import PlotUtilities + from ...plot import PlotUtilities if not self.plottable: raise TypeError( @@ -1902,7 +1902,7 @@ def plot( Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. """ - from flopy.plot.plotutil import PlotUtilities + from ...plot.plotutil import PlotUtilities if not self.plottable: raise TypeError("Simulation level packages are not plottable") diff --git a/flopy/mf6/data/mfdatalist.py b/flopy/mf6/data/mfdatalist.py index 02cadf92a9..9ad5ad52ab 100644 --- a/flopy/mf6/data/mfdatalist.py +++ b/flopy/mf6/data/mfdatalist.py @@ -1347,7 +1347,7 @@ def plot( Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. """ - from flopy.plot import PlotUtilities + from ...plot import PlotUtilities if not self.plottable: raise TypeError("Simulation level packages are not plottable") @@ -1930,7 +1930,7 @@ def plot( Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. """ - from flopy.plot import PlotUtilities + from ...plot import PlotUtilities if not self.plottable: raise TypeError("Simulation level packages are not plottable") diff --git a/flopy/mf6/data/mfdatascalar.py b/flopy/mf6/data/mfdatascalar.py index ee0fd8263d..fa509307fa 100644 --- a/flopy/mf6/data/mfdatascalar.py +++ b/flopy/mf6/data/mfdatascalar.py @@ -674,7 +674,7 @@ def plot(self, filename_base=None, file_extension=None, **kwargs): Returns: axes: list matplotlib.axes object """ - from flopy.plot.plotutil import PlotUtilities + from ...plot.plotutil import PlotUtilities if not self.plottable: raise TypeError("Scalar values are not plottable") @@ -967,7 +967,7 @@ def plot( Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. """ - from flopy.plot.plotutil import PlotUtilities + from ...plot.plotutil import PlotUtilities if not self.plottable: raise TypeError("Simulation level packages are not plottable") diff --git a/flopy/mf6/mfmodel.py b/flopy/mf6/mfmodel.py index 748034942e..828d609c5a 100644 --- a/flopy/mf6/mfmodel.py +++ b/flopy/mf6/mfmodel.py @@ -16,7 +16,7 @@ from ..discretization.vertexgrid import VertexGrid from ..discretization.unstructuredgrid import UnstructuredGrid from ..discretization.grid import Grid -from flopy.discretization.modeltime import ModelTime +from ..discretization.modeltime import ModelTime from ..mbase import ModelInterface from .utils.mfenums import DiscretizationType from .data import mfstructure @@ -1871,7 +1871,7 @@ def plot(self, SelPackList=None, **kwargs): Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis are returned. """ - from flopy.plot.plotutil import PlotUtilities + from ..plot.plotutil import PlotUtilities axes = PlotUtilities._plot_model_helper( self, SelPackList=SelPackList, **kwargs diff --git a/flopy/mf6/mfpackage.py b/flopy/mf6/mfpackage.py index 10c93cd00e..a9fa93fb66 100644 --- a/flopy/mf6/mfpackage.py +++ b/flopy/mf6/mfpackage.py @@ -2868,7 +2868,7 @@ def export(self, f, **kwargs): None or Netcdf object """ - from flopy import export + from .. import export return export.utils.package_export(f, self, **kwargs) @@ -2903,7 +2903,7 @@ def plot(self, **kwargs): a list of matplotlib.pyplot.axis are returned. """ - from flopy.plot.plotutil import PlotUtilities + from ..plot.plotutil import PlotUtilities if not self.plottable: raise TypeError("Simulation level packages are not plottable") diff --git a/flopy/mf6/modflow/mfsimulation.py b/flopy/mf6/modflow/mfsimulation.py index 9d78701531..4877597d6e 100644 --- a/flopy/mf6/modflow/mfsimulation.py +++ b/flopy/mf6/modflow/mfsimulation.py @@ -2456,7 +2456,7 @@ def plot(self, model_list=None, SelPackList=None, **kwargs): matplotlib.pyplot.axes objects """ - from flopy.plot.plotutil import PlotUtilities + from ...plot.plotutil import PlotUtilities axes = PlotUtilities._plot_simulation_helper( self, model_list=model_list, SelPackList=SelPackList, **kwargs diff --git a/flopy/mf6/utils/createpackages.py b/flopy/mf6/utils/createpackages.py index 45b688c121..3924255447 100644 --- a/flopy/mf6/utils/createpackages.py +++ b/flopy/mf6/utils/createpackages.py @@ -3,6 +3,8 @@ import datetime import textwrap from enum import Enum + +# keep below as absolute imports from flopy.mf6.data import mfstructure, mfdatautil from flopy.utils import datautil diff --git a/flopy/modflow/mf.py b/flopy/modflow/mf.py index 63eaae4b96..f5f4bc13f8 100644 --- a/flopy/modflow/mf.py +++ b/flopy/modflow/mf.py @@ -14,7 +14,7 @@ from ..discretization.structuredgrid import StructuredGrid from ..discretization.unstructuredgrid import UnstructuredGrid from ..discretization.grid import Grid -from flopy.discretization.modeltime import ModelTime +from ..discretization.modeltime import ModelTime from .mfpar import ModflowPar diff --git a/flopy/modflow/mfsfr2.py b/flopy/modflow/mfsfr2.py index cddb6e3513..afbc6f623a 100644 --- a/flopy/modflow/mfsfr2.py +++ b/flopy/modflow/mfsfr2.py @@ -2061,8 +2061,8 @@ def write_file(self, filename=None): def export(self, f, **kwargs): if isinstance(f, str) and f.lower().endswith(".shp"): - from flopy.utils.geometry import Polygon - from flopy.export.shapefile_utils import recarray2shp + from ..utils.geometry import Polygon + from ..export.shapefile_utils import recarray2shp geoms = [] for ix, i in enumerate(self.reach_data.i): @@ -2072,7 +2072,7 @@ def export(self, f, **kwargs): geoms.append(Polygon(verts)) recarray2shp(self.reach_data, geoms, shpname=f, **kwargs) else: - from flopy import export + from .. import export return export.utils.package_export(f, self, **kwargs) @@ -2083,8 +2083,8 @@ def export_linkages(self, f, **kwargs): reaches can be used to filter for the longest connections in a GIS. """ - from flopy.utils.geometry import LineString - from flopy.export.shapefile_utils import recarray2shp + from ..utils.geometry import LineString + from ..export.shapefile_utils import recarray2shp rd = self.reach_data.copy() m = self.parent @@ -2126,8 +2126,8 @@ def export_outlets(self, f, **kwargs): the model (outset=0). """ - from flopy.utils.geometry import Point - from flopy.export.shapefile_utils import recarray2shp + from ..utils.geometry import Point + from ..export.shapefile_utils import recarray2shp rd = self.reach_data if np.min(rd.outreach) == np.max(rd.outreach): @@ -2160,8 +2160,8 @@ def export_transient_variable(self, f, varname, **kwargs): Variable in SFR Package dataset 6a (see SFR package documentation) """ - from flopy.utils.geometry import Point - from flopy.export.shapefile_utils import recarray2shp + from ..utils.geometry import Point + from ..export.shapefile_utils import recarray2shp rd = self.reach_data if np.min(rd.outreach) == np.max(rd.outreach): diff --git a/flopy/modflow/mfuzf1.py b/flopy/modflow/mfuzf1.py index 91e21abf18..dc009f0113 100644 --- a/flopy/modflow/mfuzf1.py +++ b/flopy/modflow/mfuzf1.py @@ -1,4 +1,4 @@ -""" +""" mfuzf1 module. Contains the ModflowUzf1 class. Note that the user can access the ModflowUzf1 class as `flopy.modflow.ModflowUzf1`. diff --git a/flopy/mt3d/mt.py b/flopy/mt3d/mt.py index 693bea7503..6a8b37dad8 100644 --- a/flopy/mt3d/mt.py +++ b/flopy/mt3d/mt.py @@ -15,7 +15,7 @@ from .mtsft import Mt3dSft from .mtlkt import Mt3dLkt from ..discretization.structuredgrid import StructuredGrid -from flopy.discretization.modeltime import ModelTime +from ..discretization.modeltime import ModelTime class Mt3dList(Package): diff --git a/flopy/pakbase.py b/flopy/pakbase.py index f0a6bd6b6b..65c74fd4f8 100644 --- a/flopy/pakbase.py +++ b/flopy/pakbase.py @@ -676,13 +676,13 @@ def export(self, f, **kwargs): None or Netcdf object """ - from flopy import export + from . import export return export.utils.package_export(f, self, **kwargs) def _generate_heading(self): """Generate heading.""" - from flopy import __version__ + from . import __version__ parent = self.parent self.heading = ( @@ -821,7 +821,7 @@ def plot(self, **kwargs): >>> ml.dis.plot() """ - from flopy.plot import PlotUtilities + from .plot import PlotUtilities if not self.plottable: raise TypeError(f"Package {self.name} is not plottable") diff --git a/flopy/seawat/swt.py b/flopy/seawat/swt.py index fc44f3ad80..2ad41c879d 100644 --- a/flopy/seawat/swt.py +++ b/flopy/seawat/swt.py @@ -6,7 +6,7 @@ from .swtvdf import SeawatVdf from .swtvsc import SeawatVsc from ..discretization.structuredgrid import StructuredGrid -from flopy.discretization.modeltime import ModelTime +from ..discretization.modeltime import ModelTime class SeawatList(Package): diff --git a/flopy/utils/datafile.py b/flopy/utils/datafile.py index 04749dca0d..eb08c07d17 100755 --- a/flopy/utils/datafile.py +++ b/flopy/utils/datafile.py @@ -382,7 +382,7 @@ def plot( ).transpose() ).transpose() - from flopy.plot.plotutil import PlotUtilities + from ..plot.plotutil import PlotUtilities return PlotUtilities._plot_array_helper( plotarray, diff --git a/flopy/utils/util_array.py b/flopy/utils/util_array.py index 71127471f8..b7675c85d1 100644 --- a/flopy/utils/util_array.py +++ b/flopy/utils/util_array.py @@ -653,7 +653,7 @@ def plottable(self): return True def export(self, f, **kwargs): - from flopy import export + from .. import export return export.utils.array3d_export(f, self, **kwargs) @@ -726,7 +726,7 @@ def plot( >>> ml.lpf.hk.plot() """ - from flopy.plot import PlotUtilities + from ..plot import PlotUtilities axes = PlotUtilities._plot_util3d_helper( self, @@ -1530,7 +1530,7 @@ def plot( >>> ml.rch.rech.plot() """ - from flopy.plot import PlotUtilities + from ..plot import PlotUtilities axes = PlotUtilities._plot_transient2d_helper( self, @@ -1586,7 +1586,7 @@ def array(self): return arr def export(self, f, **kwargs): - from flopy import export + from .. import export return export.utils.transient2d_export(f, self, **kwargs) @@ -1984,7 +1984,7 @@ def plot( >>> ml.dis.top.plot() """ - from flopy.plot import PlotUtilities + from ..plot import PlotUtilities axes = PlotUtilities._plot_util2d_helper( self, @@ -1997,7 +1997,7 @@ def plot( return axes def export(self, f, **kwargs): - from flopy import export + from .. import export return export.utils.array2d_export(f, self, **kwargs) diff --git a/flopy/utils/util_list.py b/flopy/utils/util_list.py index 5b9d781711..7121119aa9 100644 --- a/flopy/utils/util_list.py +++ b/flopy/utils/util_list.py @@ -123,7 +123,7 @@ def get_empty(self, ncell=0): return d def export(self, f, **kwargs): - from flopy import export + from .. import export return export.utils.mflist_export(f, self, **kwargs) @@ -943,7 +943,7 @@ def plot( """ - from flopy.plot import PlotUtilities + from ..plot import PlotUtilities axes = PlotUtilities._plot_mflist_helper( self, diff --git a/flopy/utils/zonbud.py b/flopy/utils/zonbud.py index ed7bcde029..d5fc662817 100644 --- a/flopy/utils/zonbud.py +++ b/flopy/utils/zonbud.py @@ -1,3161 +1,3161 @@ -import os -import copy -import numpy as np -from itertools import groupby -from .utils_def import totim_to_datetime -from . import import_optional_dependency - - -class ZoneBudget: - """ - ZoneBudget class - - Parameters - ---------- - cbc_file : str or CellBudgetFile object - The file name or CellBudgetFile object for which budgets will be - computed. - z : ndarray - The array containing to zones to be used. - kstpkper : tuple of ints - A tuple containing the time step and stress period (kstp, kper). - The kstp and kper values are zero based. - totim : float - The simulation time. - aliases : dict - A dictionary with key, value pairs of zones and aliases. Replaces - the corresponding record and field names with the aliases provided. - When using this option in conjunction with a list of zones, the - zone(s) passed may either be all strings (aliases), all integers, - or mixed. - - Returns - ------- - None - - Examples - -------- - - >>> from flopy.utils.zonbud import ZoneBudget - >>> zon = ZoneBudget.read_zone_file('zone_input_file') - >>> zb = ZoneBudget('zonebudtest.cbc', zon, kstpkper=(0, 0)) - >>> zb.to_csv('zonebudtest.csv') - >>> zb_mgd = zb * 7.48052 / 1000000 - """ - - def __init__( - self, - cbc_file, - z, - kstpkper=None, - totim=None, - aliases=None, - verbose=False, - **kwargs, - ): - from .binaryfile import CellBudgetFile - - if isinstance(cbc_file, CellBudgetFile): - self.cbc = cbc_file - elif isinstance(cbc_file, str) and os.path.isfile(cbc_file): - self.cbc = CellBudgetFile(cbc_file) - else: - raise Exception(f"Cannot load cell budget file: {cbc_file}.") - - if isinstance(z, np.ndarray): - assert np.issubdtype( - z.dtype, np.integer - ), "Zones dtype must be integer" - else: - e = ( - "Please pass zones as a numpy ndarray of (positive)" - " integers. {}".format(z.dtype) - ) - raise Exception(e) - - # Check for negative zone values - if np.any(z < 0): - raise Exception( - "Negative zone value(s) found:", np.unique(z[z < 0]) - ) - - self.dis = None - if "model" in kwargs.keys(): - self.model = kwargs.pop("model") - self.dis = self.model.dis - if "dis" in kwargs.keys(): - self.dis = kwargs.pop("dis") - if len(kwargs.keys()) > 0: - args = ",".join(kwargs.keys()) - raise Exception(f"LayerFile error: unrecognized kwargs: {args}") - - # Check the shape of the cbc budget file arrays - self.cbc_shape = self.cbc.get_data(idx=0, full3D=True)[0].shape - self.nlay, self.nrow, self.ncol = self.cbc_shape - self.cbc_times = self.cbc.get_times() - self.cbc_kstpkper = self.cbc.get_kstpkper() - self.kstpkper = None - self.totim = None - - if kstpkper is not None: - if isinstance(kstpkper, tuple): - kstpkper = [kstpkper] - for kk in kstpkper: - s = f"The specified time step/stress period does not exist {kk}" - assert kk in self.cbc.get_kstpkper(), s - self.kstpkper = kstpkper - elif totim is not None: - if isinstance(totim, float): - totim = [totim] - elif isinstance(totim, int): - totim = [float(totim)] - for t in totim: - s = f"The specified simulation time does not exist {t}" - assert t in self.cbc.get_times(), s - self.totim = totim - else: - # No time step/stress period or simulation time pass - self.kstpkper = self.cbc.get_kstpkper() - - # Set float and integer types - self.float_type = np.float32 - self.int_type = np.int32 - - # Check dimensions of input zone array - s = ( - "Row/col dimensions of zone array {}" - " do not match model row/col dimensions {}".format( - z.shape, self.cbc_shape - ) - ) - assert z.shape[-2] == self.nrow and z.shape[-1] == self.ncol, s - - if z.shape == self.cbc_shape: - izone = z.copy() - elif len(z.shape) == 2: - izone = np.zeros(self.cbc_shape, self.int_type) - izone[:] = z[:, :] - elif len(z.shape) == 3 and z.shape[0] == 1: - izone = np.zeros(self.cbc_shape, self.int_type) - izone[:] = z[0, :, :] - else: - e = f"Shape of the zone array is not recognized: {z.shape}" - raise Exception(e) - - self.izone = izone - self.allzones = np.unique(izone) - self._zonenamedict = {z: f"ZONE_{z}" for z in self.allzones} - - if aliases is not None: - s = ( - "Input aliases not recognized. Please pass a dictionary " - "with key,value pairs of zone/alias." - ) - assert isinstance(aliases, dict), s - # Replace the relevant field names (ignore zone 0) - seen = [] - for z, a in iter(aliases.items()): - if z != 0 and z in self._zonenamedict.keys(): - if z in seen: - raise Exception( - "Zones may not have more than 1 alias." - ) - self._zonenamedict[z] = "_".join(a.split()) - seen.append(z) - - # self._iflow_recnames = self._get_internal_flow_record_names() - - # All record names in the cell-by-cell budget binary file - self.record_names = [ - n.strip() for n in self.cbc.get_unique_record_names(decode=True) - ] - - # Get imeth for each record in the CellBudgetFile record list - self.imeth = {} - for record in self.cbc.recordarray: - self.imeth[record["text"].strip().decode("utf-8")] = record[ - "imeth" - ] - - # INTERNAL FLOW TERMS ARE USED TO CALCULATE FLOW BETWEEN ZONES. - # CONSTANT-HEAD TERMS ARE USED TO IDENTIFY WHERE CONSTANT-HEAD CELLS - # ARE AND THEN USE FACE FLOWS TO DETERMINE THE AMOUNT OF FLOW. - # SWIADDTO--- terms are used by the SWI2 groundwater flow process. - internal_flow_terms = [ - "CONSTANT HEAD", - "FLOW RIGHT FACE", - "FLOW FRONT FACE", - "FLOW LOWER FACE", - "SWIADDTOCH", - "SWIADDTOFRF", - "SWIADDTOFFF", - "SWIADDTOFLF", - ] - - # Source/sink/storage term record names - # These are all of the terms that are not related to constant - # head cells or face flow terms - self.ssst_record_names = [ - n for n in self.record_names if n not in internal_flow_terms - ] - - # Initialize budget recordarray - array_list = [] - if self.kstpkper is not None: - for kk in self.kstpkper: - recordarray = self._initialize_budget_recordarray( - kstpkper=kk, totim=None - ) - array_list.append(recordarray) - elif self.totim is not None: - for t in self.totim: - recordarray = self._initialize_budget_recordarray( - kstpkper=None, totim=t - ) - array_list.append(recordarray) - self._budget = np.concatenate(array_list, axis=0) - - # Update budget record array - if self.kstpkper is not None: - for kk in self.kstpkper: - if verbose: - s = ( - "Computing the budget for" - " time step {} in stress period {}".format( - kk[0] + 1, kk[1] + 1 - ) - ) - print(s) - self._compute_budget(kstpkper=kk) - elif self.totim is not None: - for t in self.totim: - if verbose: - s = f"Computing the budget for time {t}" - print(s) - self._compute_budget(totim=t) - - def _compute_budget(self, kstpkper=None, totim=None): - """ - Creates a budget for the specified zone array. This function only - supports the use of a single time step/stress period or time. - - Parameters - ---------- - kstpkper : tuple - Tuple of kstp and kper to compute budget for (default is None). - totim : float - Totim to compute budget for (default is None). - - Returns - ------- - None - - """ - # Initialize an array to track where the constant head cells - # are located. - ich = np.zeros(self.cbc_shape, self.int_type) - swiich = np.zeros(self.cbc_shape, self.int_type) - - if "CONSTANT HEAD" in self.record_names: - """ - C-----CONSTANT-HEAD FLOW -- DON'T ACCUMULATE THE CELL-BY-CELL VALUES FOR - C-----CONSTANT-HEAD FLOW BECAUSE THEY MAY INCLUDE PARTIALLY CANCELING - C-----INS AND OUTS. USE CONSTANT-HEAD TERM TO IDENTIFY WHERE CONSTANT- - C-----HEAD CELLS ARE AND THEN USE FACE FLOWS TO DETERMINE THE AMOUNT OF - C-----FLOW. STORE CONSTANT-HEAD LOCATIONS IN ICH ARRAY. - """ - chd = self.cbc.get_data( - text="CONSTANT HEAD", - full3D=True, - kstpkper=kstpkper, - totim=totim, - )[0] - ich[np.ma.where(chd != 0.0)] = 1 - if "FLOW RIGHT FACE" in self.record_names: - self._accumulate_flow_frf("FLOW RIGHT FACE", ich, kstpkper, totim) - if "FLOW FRONT FACE" in self.record_names: - self._accumulate_flow_fff("FLOW FRONT FACE", ich, kstpkper, totim) - if "FLOW LOWER FACE" in self.record_names: - self._accumulate_flow_flf("FLOW LOWER FACE", ich, kstpkper, totim) - if "SWIADDTOCH" in self.record_names: - swichd = self.cbc.get_data( - text="SWIADDTOCH", full3D=True, kstpkper=kstpkper, totim=totim - )[0] - swiich[swichd != 0] = 1 - if "SWIADDTOFRF" in self.record_names: - self._accumulate_flow_frf("SWIADDTOFRF", swiich, kstpkper, totim) - if "SWIADDTOFFF" in self.record_names: - self._accumulate_flow_fff("SWIADDTOFFF", swiich, kstpkper, totim) - if "SWIADDTOFLF" in self.record_names: - self._accumulate_flow_flf("SWIADDTOFLF", swiich, kstpkper, totim) - - # NOT AN INTERNAL FLOW TERM, SO MUST BE A SOURCE TERM OR STORAGE - # ACCUMULATE THE FLOW BY ZONE - # iterate over remaining items in the list - for recname in self.ssst_record_names: - self._accumulate_flow_ssst(recname, kstpkper, totim) - - # Compute mass balance terms - self._compute_mass_balance(kstpkper, totim) - - return - - def _add_empty_record( - self, recordarray, recname, kstpkper=None, totim=None - ): - """ - Build an empty records based on the specified flow direction and - record name for the given list of zones. - - Parameters - ---------- - recordarray : - recname : - kstpkper : tuple - Tuple of kstp and kper to compute budget for (default is None). - totim : float - Totim to compute budget for (default is None). - - Returns - ------- - recordarray : np.recarray - - """ - if kstpkper is not None: - if len(self.cbc_times) > 0: - totim = self.cbc_times[self.cbc_kstpkper.index(kstpkper)] - else: - totim = 0.0 - elif totim is not None: - if len(self.cbc_times) > 0: - kstpkper = self.cbc_kstpkper[self.cbc_times.index(totim)] - else: - kstpkper = (0, 0) - - row = [totim, kstpkper[0], kstpkper[1], recname] - row += [0.0 for _ in self._zonenamedict.values()] - recs = np.array(tuple(row), dtype=recordarray.dtype) - recordarray = np.append(recordarray, recs) - return recordarray - - def _initialize_budget_recordarray(self, kstpkper=None, totim=None): - """ - Initialize the budget record array which will store all of the - fluxes in the cell-budget file. - - Parameters - ---------- - kstpkper : tuple - Tuple of kstp and kper to compute budget for (default is None). - totim : float - Totim to compute budget for (default is None). - - Returns - ------- - - """ - - # Create empty array for the budget terms. - dtype_list = [ - ("totim", "= 2: - data = self.cbc.get_data( - text=recname, kstpkper=kstpkper, totim=totim - )[0] - - # "FLOW RIGHT FACE" COMPUTE FLOW BETWEEN ZONES ACROSS COLUMNS. - # COMPUTE FLOW ONLY BETWEEN A ZONE AND A HIGHER ZONE -- FLOW FROM - # ZONE 4 TO 3 IS THE NEGATIVE OF FLOW FROM 3 TO 4. - # 1ST, CALCULATE FLOW BETWEEN NODE J,I,K AND J-1,I,K - - k, i, j = np.where( - self.izone[:, :, 1:] > self.izone[:, :, :-1] - ) - - # Adjust column values to account for the starting position of "nz" - j += 1 - - # Define the zone to which flow is going - nz = self.izone[k, i, j] - - # Define the zone from which flow is coming - jl = j - 1 - nzl = self.izone[k, i, jl] - - # Get the face flow - q = data[k, i, jl] - - # Get indices where flow face values are positive (flow out of higher zone) - # Don't include CH to CH flow (can occur if CHTOCH option is used) - # Create an iterable tuple of (from zone, to zone, flux) - # Then group tuple by (from_zone, to_zone) and sum the flux values - idx = np.where( - (q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1)) - ) - fzi, tzi, fi = sum_flux_tuples(nzl[idx], nz[idx], q[idx]) - self._update_budget_fromfaceflow( - fzi, tzi, np.abs(fi), kstpkper, totim - ) - - # Get indices where flow face values are negative (flow into higher zone) - # Don't include CH to CH flow (can occur if CHTOCH option is used) - # Create an iterable tuple of (from zone, to zone, flux) - # Then group tuple by (from_zone, to_zone) and sum the flux values - idx = np.where( - (q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1)) - ) - fzi, tzi, fi = sum_flux_tuples(nz[idx], nzl[idx], q[idx]) - self._update_budget_fromfaceflow( - fzi, tzi, np.abs(fi), kstpkper, totim - ) - - # FLOW BETWEEN NODE J,I,K AND J+1,I,K - k, i, j = np.where( - self.izone[:, :, :-1] > self.izone[:, :, 1:] - ) - - # Define the zone from which flow is coming - nz = self.izone[k, i, j] - - # Define the zone to which flow is going - jr = j + 1 - nzr = self.izone[k, i, jr] - - # Get the face flow - q = data[k, i, j] - - # Get indices where flow face values are positive (flow out of higher zone) - # Don't include CH to CH flow (can occur if CHTOCH option is used) - # Create an iterable tuple of (from zone, to zone, flux) - # Then group tuple by (from_zone, to_zone) and sum the flux values - idx = np.where( - (q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1)) - ) - fzi, tzi, fi = sum_flux_tuples(nz[idx], nzr[idx], q[idx]) - self._update_budget_fromfaceflow( - fzi, tzi, np.abs(fi), kstpkper, totim - ) - - # Get indices where flow face values are negative (flow into higher zone) - # Don't include CH to CH flow (can occur if CHTOCH option is used) - # Create an iterable tuple of (from zone, to zone, flux) - # Then group tuple by (from_zone, to_zone) and sum the flux values - idx = np.where( - (q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1)) - ) - fzi, tzi, fi = sum_flux_tuples(nzr[idx], nz[idx], q[idx]) - self._update_budget_fromfaceflow( - fzi, tzi, np.abs(fi), kstpkper, totim - ) - - # CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION - k, i, j = np.where(ich == 1) - k, i, j = k[j > 0], i[j > 0], j[j > 0] - jl = j - 1 - nzl = self.izone[k, i, jl] - nz = self.izone[k, i, j] - q = data[k, i, jl] - idx = np.where( - (q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1)) - ) - fzi, tzi, f = sum_flux_tuples(nzl[idx], nz[idx], q[idx]) - fz = ["TO_CONSTANT_HEAD"] * len(tzi) - tz = [self._zonenamedict[z] for z in tzi] - self._update_budget_fromssst( - fz, tz, np.abs(f), kstpkper, totim - ) - - idx = np.where( - (q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1)) - ) - fzi, tzi, f = sum_flux_tuples(nzl[idx], nz[idx], q[idx]) - fz = ["FROM_CONSTANT_HEAD"] * len(fzi) - tz = [self._zonenamedict[z] for z in tzi[tzi != 0]] - self._update_budget_fromssst( - fz, tz, np.abs(f), kstpkper, totim - ) - - k, i, j = np.where(ich == 1) - k, i, j = ( - k[j < self.ncol - 1], - i[j < self.ncol - 1], - j[j < self.ncol - 1], - ) - nz = self.izone[k, i, j] - jr = j + 1 - nzr = self.izone[k, i, jr] - q = data[k, i, j] - idx = np.where( - (q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1)) - ) - fzi, tzi, f = sum_flux_tuples(nzr[idx], nz[idx], q[idx]) - fz = ["FROM_CONSTANT_HEAD"] * len(tzi) - tz = [self._zonenamedict[z] for z in tzi] - self._update_budget_fromssst( - fz, tz, np.abs(f), kstpkper, totim - ) - - idx = np.where( - (q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1)) - ) - fzi, tzi, f = sum_flux_tuples(nzr[idx], nz[idx], q[idx]) - fz = ["TO_CONSTANT_HEAD"] * len(fzi) - tz = [self._zonenamedict[z] for z in tzi] - self._update_budget_fromssst( - fz, tz, np.abs(f), kstpkper, totim - ) - - except Exception as e: - print(e) - raise - return - - def _accumulate_flow_fff(self, recname, ich, kstpkper, totim): - """ - - Parameters - ---------- - recname - ich - kstpkper - totim - - Returns - ------- - - """ - try: - if self.nrow >= 2: - data = self.cbc.get_data( - text=recname, kstpkper=kstpkper, totim=totim - )[0] - - # "FLOW FRONT FACE" - # CALCULATE FLOW BETWEEN NODE J,I,K AND J,I-1,K - k, i, j = np.where( - self.izone[:, 1:, :] < self.izone[:, :-1, :] - ) - i += 1 - ia = i - 1 - nza = self.izone[k, ia, j] - nz = self.izone[k, i, j] - q = data[k, ia, j] - idx = np.where( - (q > 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1)) - ) - fzi, tzi, fi = sum_flux_tuples(nza[idx], nz[idx], q[idx]) - self._update_budget_fromfaceflow( - fzi, tzi, np.abs(fi), kstpkper, totim - ) - - idx = np.where( - (q < 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1)) - ) - fzi, tzi, fi = sum_flux_tuples(nz[idx], nza[idx], q[idx]) - self._update_budget_fromfaceflow( - fzi, tzi, np.abs(fi), kstpkper, totim - ) - - # CALCULATE FLOW BETWEEN NODE J,I,K AND J,I+1,K. - k, i, j = np.where( - self.izone[:, :-1, :] < self.izone[:, 1:, :] - ) - nz = self.izone[k, i, j] - ib = i + 1 - nzb = self.izone[k, ib, j] - q = data[k, i, j] - idx = np.where( - (q > 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1)) - ) - fzi, tzi, fi = sum_flux_tuples(nz[idx], nzb[idx], q[idx]) - self._update_budget_fromfaceflow( - fzi, tzi, np.abs(fi), kstpkper, totim - ) - - idx = np.where( - (q < 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1)) - ) - fzi, tzi, fi = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) - self._update_budget_fromfaceflow( - fzi, tzi, np.abs(fi), kstpkper, totim - ) - - # CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION - k, i, j = np.where(ich == 1) - k, i, j = k[i > 0], i[i > 0], j[i > 0] - ia = i - 1 - nza = self.izone[k, ia, j] - nz = self.izone[k, i, j] - q = data[k, ia, j] - idx = np.where( - (q > 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1)) - ) - fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx]) - fz = ["TO_CONSTANT_HEAD"] * len(tzi) - tz = [self._zonenamedict[z] for z in tzi] - self._update_budget_fromssst( - fz, tz, np.abs(f), kstpkper, totim - ) - - idx = np.where( - (q < 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1)) - ) - fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx]) - fz = ["FROM_CONSTANT_HEAD"] * len(fzi) - tz = [self._zonenamedict[z] for z in tzi] - self._update_budget_fromssst( - fz, tz, np.abs(f), kstpkper, totim - ) - - k, i, j = np.where(ich == 1) - k, i, j = ( - k[i < self.nrow - 1], - i[i < self.nrow - 1], - j[i < self.nrow - 1], - ) - nz = self.izone[k, i, j] - ib = i + 1 - nzb = self.izone[k, ib, j] - q = data[k, i, j] - idx = np.where( - (q > 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1)) - ) - fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) - fz = ["FROM_CONSTANT_HEAD"] * len(tzi) - tz = [self._zonenamedict[z] for z in tzi] - self._update_budget_fromssst( - fz, tz, np.abs(f), kstpkper, totim - ) - - idx = np.where( - (q < 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1)) - ) - fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) - fz = ["TO_CONSTANT_HEAD"] * len(fzi) - tz = [self._zonenamedict[z] for z in tzi] - self._update_budget_fromssst( - fz, tz, np.abs(f), kstpkper, totim - ) - - except Exception as e: - print(e) - raise - return - - def _accumulate_flow_flf(self, recname, ich, kstpkper, totim): - """ - - Parameters - ---------- - recname - ich - kstpkper - totim - - Returns - ------- - - """ - try: - if self.nlay >= 2: - data = self.cbc.get_data( - text=recname, kstpkper=kstpkper, totim=totim - )[0] - - # "FLOW LOWER FACE" - # CALCULATE FLOW BETWEEN NODE J,I,K AND J,I,K-1 - k, i, j = np.where( - self.izone[1:, :, :] < self.izone[:-1, :, :] - ) - k += 1 - ka = k - 1 - nza = self.izone[ka, i, j] - nz = self.izone[k, i, j] - q = data[ka, i, j] - idx = np.where( - (q > 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1)) - ) - fzi, tzi, fi = sum_flux_tuples(nza[idx], nz[idx], q[idx]) - self._update_budget_fromfaceflow( - fzi, tzi, np.abs(fi), kstpkper, totim - ) - - idx = np.where( - (q < 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1)) - ) - fzi, tzi, fi = sum_flux_tuples(nz[idx], nza[idx], q[idx]) - self._update_budget_fromfaceflow( - fzi, tzi, np.abs(fi), kstpkper, totim - ) - - # CALCULATE FLOW BETWEEN NODE J,I,K AND J,I,K+1 - k, i, j = np.where( - self.izone[:-1, :, :] < self.izone[1:, :, :] - ) - nz = self.izone[k, i, j] - kb = k + 1 - nzb = self.izone[kb, i, j] - q = data[k, i, j] - idx = np.where( - (q > 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1)) - ) - fzi, tzi, fi = sum_flux_tuples(nz[idx], nzb[idx], q[idx]) - self._update_budget_fromfaceflow( - fzi, tzi, np.abs(fi), kstpkper, totim - ) - - idx = np.where( - (q < 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1)) - ) - fzi, tzi, fi = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) - self._update_budget_fromfaceflow( - fzi, tzi, np.abs(fi), kstpkper, totim - ) - - # CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION - k, i, j = np.where(ich == 1) - k, i, j = k[k > 0], i[k > 0], j[k > 0] - ka = k - 1 - nza = self.izone[ka, i, j] - nz = self.izone[k, i, j] - q = data[ka, i, j] - idx = np.where( - (q > 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1)) - ) - fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx]) - fz = ["TO_CONSTANT_HEAD"] * len(tzi) - tz = [self._zonenamedict[z] for z in tzi] - self._update_budget_fromssst( - fz, tz, np.abs(f), kstpkper, totim - ) - - idx = np.where( - (q < 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1)) - ) - fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx]) - fz = ["FROM_CONSTANT_HEAD"] * len(fzi) - tz = [self._zonenamedict[z] for z in tzi] - self._update_budget_fromssst( - fz, tz, np.abs(f), kstpkper, totim - ) - - k, i, j = np.where(ich == 1) - k, i, j = ( - k[k < self.nlay - 1], - i[k < self.nlay - 1], - j[k < self.nlay - 1], - ) - nz = self.izone[k, i, j] - kb = k + 1 - nzb = self.izone[kb, i, j] - q = data[k, i, j] - idx = np.where( - (q > 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1)) - ) - fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) - fz = ["FROM_CONSTANT_HEAD"] * len(tzi) - tz = [self._zonenamedict[z] for z in tzi] - self._update_budget_fromssst( - fz, tz, np.abs(f), kstpkper, totim - ) - - idx = np.where( - (q < 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1)) - ) - fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) - fz = ["TO_CONSTANT_HEAD"] * len(fzi) - tz = [self._zonenamedict[z] for z in tzi] - self._update_budget_fromssst( - fz, tz, np.abs(f), kstpkper, totim - ) - - except Exception as e: - print(e) - raise - return - - def _accumulate_flow_ssst(self, recname, kstpkper, totim): - - # NOT AN INTERNAL FLOW TERM, SO MUST BE A SOURCE TERM OR STORAGE - # ACCUMULATE THE FLOW BY ZONE - - imeth = self.imeth[recname] - - data = self.cbc.get_data(text=recname, kstpkper=kstpkper, totim=totim) - if len(data) == 0: - # Empty data, can occur during the first time step of a transient - # model when storage terms are zero and not in the cell-budget - # file. - return - else: - data = data[0] - - if imeth == 2 or imeth == 5: - # LIST - qin = np.ma.zeros( - (self.nlay * self.nrow * self.ncol), self.float_type - ) - qout = np.ma.zeros( - (self.nlay * self.nrow * self.ncol), self.float_type - ) - for [node, q] in zip(data["node"], data["q"]): - idx = node - 1 - if q > 0: - qin.data[idx] += q - elif q < 0: - qout.data[idx] += q - qin = np.ma.reshape(qin, (self.nlay, self.nrow, self.ncol)) - qout = np.ma.reshape(qout, (self.nlay, self.nrow, self.ncol)) - elif imeth == 0 or imeth == 1: - # FULL 3-D ARRAY - qin = np.ma.zeros(self.cbc_shape, self.float_type) - qout = np.ma.zeros(self.cbc_shape, self.float_type) - qin[data > 0] = data[data > 0] - qout[data < 0] = data[data < 0] - elif imeth == 3: - # 1-LAYER ARRAY WITH LAYER INDICATOR ARRAY - rlay, rdata = data[0], data[1] - data = np.ma.zeros(self.cbc_shape, self.float_type) - for (r, c), l in np.ndenumerate(rlay): - data[l - 1, r, c] = rdata[r, c] - qin = np.ma.zeros(self.cbc_shape, self.float_type) - qout = np.ma.zeros(self.cbc_shape, self.float_type) - qin[data > 0] = data[data > 0] - qout[data < 0] = data[data < 0] - elif imeth == 4: - # 1-LAYER ARRAY THAT DEFINES LAYER 1 - qin = np.ma.zeros(self.cbc_shape, self.float_type) - qout = np.ma.zeros(self.cbc_shape, self.float_type) - r, c = np.where(data > 0) - qin[0, r, c] = data[r, c] - r, c = np.where(data < 0) - qout[0, r, c] = data[r, c] - else: - # Should not happen - raise Exception( - f'Unrecognized "imeth" for {recname} record: {imeth}' - ) - - # Inflows - fz = [] - tz = [] - f = [] - for z in self.allzones: - if z != 0: - flux = qin[(self.izone == z)].sum() - if type(flux) == np.ma.core.MaskedConstant: - flux = 0.0 - fz.append("FROM_" + "_".join(recname.split())) - tz.append(self._zonenamedict[z]) - f.append(flux) - fz = np.array(fz) - tz = np.array(tz) - f = np.array(f) - self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim) - - # Outflows - fz = [] - tz = [] - f = [] - for z in self.allzones: - if z != 0: - flux = qout[(self.izone == z)].sum() - if type(flux) == np.ma.core.MaskedConstant: - flux = 0.0 - fz.append("TO_" + "_".join(recname.split())) - tz.append(self._zonenamedict[z]) - f.append(flux) - fz = np.array(fz) - tz = np.array(tz) - f = np.array(f) - self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim) - - def _compute_mass_balance(self, kstpkper, totim): - # Returns a record array with total inflow, total outflow, - # and percent error summed by column. - skipcols = ["time_step", "stress_period", "totim", "name"] - - # Compute inflows - recnames = self.get_record_names() - innames = [n for n in recnames if n.startswith("FROM_")] - outnames = [n for n in recnames if n.startswith("TO_")] - if kstpkper is not None: - rowidx = np.where( - (self._budget["time_step"] == kstpkper[0]) - & (self._budget["stress_period"] == kstpkper[1]) - & np.in1d(self._budget["name"], innames) - ) - elif totim is not None: - rowidx = np.where( - (self._budget["totim"] == totim) - & np.in1d(self._budget["name"], innames) - ) - a = _numpyvoid2numeric( - self._budget[list(self._zonenamedict.values())][rowidx] - ) - intot = np.array(a.sum(axis=0)) - tz = np.array( - list([n for n in self._budget.dtype.names if n not in skipcols]) - ) - fz = np.array(["TOTAL_IN"] * len(tz)) - self._update_budget_fromssst(fz, tz, intot, kstpkper, totim) - - # Compute outflows - if kstpkper is not None: - rowidx = np.where( - (self._budget["time_step"] == kstpkper[0]) - & (self._budget["stress_period"] == kstpkper[1]) - & np.in1d(self._budget["name"], outnames) - ) - elif totim is not None: - rowidx = np.where( - (self._budget["totim"] == totim) - & np.in1d(self._budget["name"], outnames) - ) - a = _numpyvoid2numeric( - self._budget[list(self._zonenamedict.values())][rowidx] - ) - outot = np.array(a.sum(axis=0)) - tz = np.array( - list([n for n in self._budget.dtype.names if n not in skipcols]) - ) - fz = np.array(["TOTAL_OUT"] * len(tz)) - self._update_budget_fromssst(fz, tz, outot, kstpkper, totim) - - # Compute IN-OUT - tz = np.array( - list([n for n in self._budget.dtype.names if n not in skipcols]) - ) - f = intot - outot - fz = np.array(["IN-OUT"] * len(tz)) - self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim) - - # Compute percent discrepancy - tz = np.array( - list([n for n in self._budget.dtype.names if n not in skipcols]) - ) - fz = np.array(["PERCENT_DISCREPANCY"] * len(tz)) - in_minus_out = intot - outot - in_plus_out = intot + outot - f = 100 * in_minus_out / (in_plus_out / 2.0) - self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim) - - def get_model_shape(self): - """Get model shape - - Returns - ------- - nlay : int - Number of layers - nrow : int - Number of rows - ncol : int - Number of columns - - """ - return self.nlay, self.nrow, self.ncol - - def get_record_names(self, stripped=False): - """ - Get a list of water budget record names in the file. - - Returns - ------- - out : list of strings - List of unique text names in the binary file. - - Examples - -------- - - >>> zb = ZoneBudget('zonebudtest.cbc', zon, kstpkper=(0, 0)) - >>> recnames = zb.get_record_names() - - """ - return _get_record_names(self._budget, stripped=stripped) - - def get_budget(self, names=None, zones=None, net=False, pivot=False): - """ - Get a list of zonebudget record arrays. - - Parameters - ---------- - - names : list of strings - A list of strings containing the names of the records desired. - zones : list of ints or strings - A list of integer zone numbers or zone names desired. - net : boolean - If True, returns net IN-OUT for each record. - pivot : boolean - If True, returns data in a more user friendly format - - Returns - ------- - budget_list : list of record arrays - A list of the zonebudget record arrays. - - Examples - -------- - - >>> names = ['FROM_CONSTANT_HEAD', 'RIVER_LEAKAGE_OUT'] - >>> zones = ['ZONE_1', 'ZONE_2'] - >>> zb = ZoneBudget('zonebudtest.cbc', zon, kstpkper=(0, 0)) - >>> bud = zb.get_budget(names=names, zones=zones) - - """ - recarray = _get_budget( - self._budget, self._zonenamedict, names=names, zones=zones, net=net - ) - - if pivot: - recarray = _pivot_recarray(recarray) - - return recarray - - def get_volumetric_budget( - self, modeltime, recarray=None, extrapolate_kper=False - ): - """ - Method to generate a volumetric budget table based on flux information - - Parameters - ---------- - modeltime : flopy.discretization.ModelTime object - ModelTime object for calculating volumes - recarray : np.recarray - optional, user can pass in a numpy recarray to calculate volumetric - budget. recarray must be pivoted before passing to - get_volumetric_budget - extrapolate_kper : bool - flag to determine if we fill in data gaps with other - timestep information from the same stress period. - if True, we assume that flux is constant throughout a stress period - and the pandas dataframe returned contains a - volumetric budget per stress period - - if False, calculates volumes from available flux data - - Returns - ------- - pd.DataFrame - - """ - if recarray is None: - recarray = self.get_budget(pivot=True) - return _volumetric_flux(recarray, modeltime, extrapolate_kper) - - def to_csv(self, fname): - """ - Saves the budget record arrays to a formatted - comma-separated values file. - - Parameters - ---------- - fname : str - The name of the output comma-separated values file. - - Returns - ------- - None - - """ - # Needs updating to handle the new budget list structure. Write out - # budgets for all kstpkper if kstpkper is None or pass list of - # kstpkper/totim to save particular budgets. - with open(fname, "w") as f: - # Write header - f.write(",".join(self._budget.dtype.names) + "\n") - # Write rows - for rowidx in range(self._budget.shape[0]): - s = ( - ",".join([str(i) for i in list(self._budget[:][rowidx])]) - + "\n" - ) - f.write(s) - return - - def get_dataframes( - self, - start_datetime=None, - timeunit="D", - index_key="totim", - names=None, - zones=None, - net=False, - pivot=False, - ): - """ - Get pandas dataframes. - - Parameters - ---------- - - start_datetime : str - Datetime string indicating the time at which the simulation starts. - timeunit : str - String that indicates the time units used in the model. - index_key : str - Indicates the fields to be used (in addition to "record") in the - resulting DataFrame multi-index. - names : list of strings - A list of strings containing the names of the records desired. - zones : list of ints or strings - A list of integer zone numbers or zone names desired. - net : boolean - If True, returns net IN-OUT for each record. - pivot : bool - If True, returns dataframe in a more user friendly format - - Returns - ------- - df : Pandas DataFrame - Pandas DataFrame with the budget information. - - Examples - -------- - >>> from flopy.utils.zonbud import ZoneBudget - >>> zon = ZoneBudget.read_zone_file('zone_input_file') - >>> zb = ZoneBudget('zonebudtest.cbc', zon, kstpkper=(0, 0)) - >>> df = zb.get_dataframes() - - """ - recarray = self.get_budget(names, zones, net, pivot=pivot) - return _recarray_to_dataframe( - recarray, - self._zonenamedict, - start_datetime=start_datetime, - timeunit=timeunit, - index_key=index_key, - zones=zones, - pivot=pivot, - ) - - @classmethod - def _get_otype(cls, fname): - """ - Method to automatically distinguish output type based on the - zonebudget header - - Parameters - ---------- - fname : str - zonebudget output file name - - Returns - ------- - otype : int - - """ - with open(fname) as foo: - line = foo.readline() - if "zonebudget version" in line.lower(): - otype = 0 - elif "time step" in line.lower(): - otype = 1 - elif "totim" in line.lower(): - otype = 2 - else: - raise AssertionError("Cant distinguish output type") - return otype - - @classmethod - def read_output(cls, fname, net=False, dataframe=False, **kwargs): - """ - Method to read a zonebudget output file into a recarray or pandas - dataframe - - Parameters - ---------- - fname : str - zonebudget output file name - net : bool - boolean flag for net budget - dataframe : bool - boolean flag to return a pandas dataframe - - **kwargs - pivot : bool - - start_datetime : str - Datetime string indicating the time at which the simulation - starts. Can be used when pandas dataframe is requested - timeunit : str - String that indicates the time units used in the model. - - - Returns - ------- - np.recarray - """ - otype = ZoneBudget._get_otype(fname) - if otype == 0: - recarray = _read_zb_zblst(fname) - elif otype == 1: - recarray = _read_zb_csv(fname) - else: - add_prefix = kwargs.pop("add_prefix", True) - recarray = _read_zb_csv2(fname, add_prefix=add_prefix) - - zonenamdict = { - int(i.split("_")[-1]): i - for i in recarray.dtype.names - if i.startswith("ZONE") - } - pivot = kwargs.pop("pivot", False) - recarray = _get_budget(recarray, zonenamdict, net=net) - if pivot: - recarray = _pivot_recarray(recarray) - - if not dataframe: - return recarray - else: - start_datetime = kwargs.pop("start_datetime", None) - timeunit = kwargs.pop("timeunit", "D") - return _recarray_to_dataframe( - recarray, - zonenamdict, - start_datetime=start_datetime, - timeunit=timeunit, - pivot=pivot, - ) - - @classmethod - def read_zone_file(cls, fname): - """Method to read a zonebudget zone file into memory - - Parameters - ---------- - fname : str - zone file name - - Returns - ------- - zones : np.array - - """ - with open(fname, "r") as f: - lines = f.readlines() - - # Initialize layer - lay = 0 - - # Initialize data counter - totlen = 0 - i = 0 - - # First line contains array dimensions - dimstring = lines.pop(0).strip().split() - nlay, nrow, ncol = [int(v) for v in dimstring] - zones = np.zeros((nlay, nrow, ncol), dtype=np.int32) - - # The number of values to read before placing - # them into the zone array - datalen = nrow * ncol - - # List of valid values for LOCAT - locats = ["CONSTANT", "INTERNAL", "EXTERNAL"] - - # ITERATE OVER THE ROWS - for line in lines: - rowitems = line.strip().split() - - # Skip blank lines - if len(rowitems) == 0: - continue - - # HEADER - if rowitems[0].upper() in locats: - vals = [] - locat = rowitems[0].upper() - - if locat == "CONSTANT": - iconst = int(rowitems[1]) - else: - fmt = rowitems[1].strip("()") - fmtin, iprn = [int(v) for v in fmt.split("I")] - - # ZONE DATA - else: - if locat == "CONSTANT": - vals = np.ones((nrow, ncol), dtype=int) * iconst - lay += 1 - elif locat == "INTERNAL": - # READ ZONES - rowvals = [int(v) for v in rowitems] - s = "Too many values encountered on this line." - assert len(rowvals) <= fmtin, s - vals.extend(rowvals) - - elif locat == "EXTERNAL": - # READ EXTERNAL FILE - fname = rowitems[0] - if not os.path.isfile(fname): - errmsg = f'Could not find external file "{fname}"' - raise Exception(errmsg) - with open(fname, "r") as ext_f: - ext_flines = ext_f.readlines() - for ext_frow in ext_flines: - ext_frowitems = ext_frow.strip().split() - rowvals = [int(v) for v in ext_frowitems] - vals.extend(rowvals) - if len(vals) != datalen: - errmsg = ( - "The number of values read from external " - 'file "{}" does not match the expected ' - "number.".format(len(vals)) - ) - raise Exception(errmsg) - else: - # Should not get here - raise Exception(f"Locat not recognized: {locat}") - - # IGNORE COMPOSITE ZONES - - if len(vals) == datalen: - # place values for the previous layer into the zone array - vals = np.array(vals, dtype=int).reshape((nrow, ncol)) - zones[lay, :, :] = vals[:, :] - lay += 1 - totlen += len(rowitems) - i += 1 - s = ( - "The number of values read ({:,.0f})" - " does not match the number expected" - " ({:,.0f})".format(totlen, nlay * nrow * ncol) - ) - assert totlen == nlay * nrow * ncol, s - return zones - - @classmethod - def write_zone_file(cls, fname, array, fmtin=None, iprn=None): - """ - Saves a numpy array in a format readable by the zonebudget program - executable. - - File format: - line 1: nlay, nrow, ncol - line 2: INTERNAL (format) - line 3: begin data - . - . - . - - example from NACP: - 19 250 500 - INTERNAL (10I7) - 199 199 199 199 199 199 199 199 199 - 199 199 199 199 199 199 199 199 199 - ... - INTERNAL (10I7) - 199 199 199 199 199 199 199 199 199 - 199 199 199 199 199 199 199 199 199 - ... - - Parameters - ---------- - array : array - The array of zones to be written. - fname : str - The path and name of the file to be written. - fmtin : int - The number of values to write to each line. - iprn : int - Padding space to add between each value. - - Returns - ------- - - """ - if len(array.shape) == 2: - b = np.zeros((1, array.shape[0], array.shape[1]), dtype=np.int32) - b[0, :, :] = array[:, :] - array = b.copy() - elif len(array.shape) < 2 or len(array.shape) > 3: - raise Exception( - f"Shape of the input array is not recognized: {array.shape}" - ) - if np.ma.is_masked(array): - array = np.ma.filled(array, 0) - - nlay, nrow, ncol = array.shape - - if fmtin is not None: - assert fmtin <= ncol, ( - "The specified width is greater than the " - "number of columns in the array." - ) - else: - fmtin = ncol - - iprnmin = len(str(array.max())) - if iprn is None or iprn <= iprnmin: - iprn = iprnmin + 1 - - formatter_str = f"{{:>{iprn}}}" - formatter = formatter_str.format - - with open(fname, "w") as f: - header = f"{nlay} {nrow} {ncol}\n" - f.write(header) - for lay in range(nlay): - record_2 = f"INTERNAL\t({fmtin}I{iprn})\n" - f.write(record_2) - if fmtin < ncol: - for row in range(nrow): - rowvals = array[lay, row, :].ravel() - start = 0 - end = start + fmtin - vals = rowvals[start:end] - while len(vals) > 0: - s = ( - "".join([formatter(int(val)) for val in vals]) - + "\n" - ) - f.write(s) - start = end - end = start + fmtin - vals = rowvals[start:end] - - elif fmtin == ncol: - for row in range(nrow): - vals = array[lay, row, :].ravel() - f.write( - "".join([formatter(int(val)) for val in vals]) - + "\n" - ) - - def copy(self): - """ - Return a deepcopy of the object. - """ - return copy.deepcopy(self) - - def export(self, f, ml, **kwargs): - """ - Method to export a netcdf file, or add zonebudget output to - an open netcdf file instance - - Parameters - ---------- - f : str or flopy.export.netcdf.NetCdf object - ml : flopy.modflow.Modflow or flopy.mf6.ModflowGwf object - **kwargs : - logger : flopy.export.netcdf.Logger instance - masked_vals : list - list of values to mask - - Returns - ------- - flopy.export.netcdf.NetCdf object - - """ - from flopy.export.utils import output_helper - - if isinstance(f, str): - if not f.endswith(".nc"): - raise AssertionError( - "File extension must end with .nc to " - "export a netcdf file" - ) - - zbncfobj = dataframe_to_netcdf_fmt( - self.get_dataframes(pivot=True), self.izone, flux=True - ) - oudic = {"zbud": zbncfobj} - return output_helper(f, ml, oudic, **kwargs) - - def __deepcopy__(self, memo): - """ - Over-rides the default deepcopy behavior. Copy all attributes except - the CellBudgetFile object which does not copy nicely. - """ - cls = self.__class__ - result = cls.__new__(cls) - memo[id(self)] = result - ignore_attrs = ["cbc"] - for k, v in self.__dict__.items(): - if k not in ignore_attrs: - setattr(result, k, copy.deepcopy(v, memo)) - - # Set CellBudgetFile object attribute manually. This is object - # read-only so should not be problems with pointers from - # multiple objects. - result.cbc = self.cbc - return result - - def __mul__(self, other): - newbud = self._budget.copy() - for f in self._zonenamedict.values(): - newbud[f] = np.array([r for r in newbud[f]]) * other - idx = np.in1d(self._budget["name"], "PERCENT_DISCREPANCY") - newbud[:][idx] = self._budget[:][idx] - newobj = self.copy() - newobj._budget = newbud - return newobj - - def __truediv__(self, other): - newbud = self._budget.copy() - for f in self._zonenamedict.values(): - newbud[f] = np.array([r for r in newbud[f]]) / float(other) - idx = np.in1d(self._budget["name"], "PERCENT_DISCREPANCY") - newbud[:][idx] = self._budget[:][idx] - newobj = self.copy() - newobj._budget = newbud - return newobj - - def __div__(self, other): - newbud = self._budget.copy() - for f in self._zonenamedict.values(): - newbud[f] = np.array([r for r in newbud[f]]) / float(other) - idx = np.in1d(self._budget["name"], "PERCENT_DISCREPANCY") - newbud[:][idx] = self._budget[:][idx] - newobj = self.copy() - newobj._budget = newbud - return newobj - - def __add__(self, other): - newbud = self._budget.copy() - for f in self._zonenamedict.values(): - newbud[f] = np.array([r for r in newbud[f]]) + other - idx = np.in1d(self._budget["name"], "PERCENT_DISCREPANCY") - newbud[:][idx] = self._budget[:][idx] - newobj = self.copy() - newobj._budget = newbud - return newobj - - def __sub__(self, other): - newbud = self._budget.copy() - for f in self._zonenamedict.values(): - newbud[f] = np.array([r for r in newbud[f]]) - other - idx = np.in1d(self._budget["name"], "PERCENT_DISCREPANCY") - newbud[:][idx] = self._budget[:][idx] - newobj = self.copy() - newobj._budget = newbud - return newobj - - -class ZoneBudget6: - """ - Model class for building, editing and running MODFLOW 6 zonebuget - - Parameters - ---------- - name : str - model name for zonebudget - model_ws : str - path to model - exe_name : str - excutable name - extension : str - name file extension - """ - - def __init__( - self, - name="zonebud", - model_ws=".", - exe_name="zbud6", - extension=".zbnam", - ): - from ..mf6.utils import MfGrdFile - from .binaryfile import CellBudgetFile - - self._name = name - self._zon = None - self._grb = None - self._bud = None - self._model_ws = model_ws - self._exe_name = exe_name - - if not extension.startswith("."): - extension = "." + extension - - self._extension = extension - self.zbnam_packages = { - "zon": ZoneFile6, - "bud": CellBudgetFile, - "grb": MfGrdFile, - } - self.package_dict = {} - if self._zon is not None: - self.package_dict["zon"] = self._zon - if self._grb is not None: - self.package_dict["grb"] = self._grb - if self._bud is not None: - self.package_dict["bud"] = self._bud - - self._recarray = None - - def run_model(self, exe_name=None, nam_file=None, silent=False): - """ - Method to run a zonebudget model - - Parameters - ---------- - exe_name : str - optional zonebudget executable name - nam_file : str - optional zonebudget name file name - silent : bool - optional flag to silence output - - Returns - ------- - tuple - """ - from ..mbase import run_model - - if exe_name is None: - exe_name = self._exe_name - if nam_file is None: - nam_file = os.path.join(self._name + self._extension) - return run_model( - exe_name, nam_file, model_ws=self._model_ws, silent=silent - ) - - def __setattr__(self, key, value): - if key in ("zon", "bud", "grb", "cbc"): - self.add_package(key, value) - return - elif key == "model_ws": - raise AttributeError("please use change_model_ws() method") - elif key == "name": - self.change_model_name(value) - super().__setattr__(key, value) - - def __getattr__(self, item): - if item in ("zon", "bud", "grb", "name", "model_ws"): - item = f"_{item}" - return super().__getattribute__(item) - - def add_package(self, pkg_name, pkg): - """ - Method to add a package to the ZoneBudget6 object - - Parameters - ---------- - pkg_name : str - three letter package abbreviation - pkg : str or object - either a package file name or package object - - """ - pkg_name = pkg_name.lower() - if pkg_name not in self.zbnam_packages: - if pkg_name == "cbc": - pkg_name = "bud" - else: - raise KeyError( - f"{pkg_name} package is not valid for zonebudget" - ) - - if isinstance(pkg, str): - if os.path.exists(os.path.join(self._model_ws, pkg)): - pkg = os.path.join(self._model_ws, pkg) - - func = self.zbnam_packages[pkg_name] - if pkg_name in ("bud", "grb"): - pkg = func(pkg, precision="double") - else: - pkg = func.load(pkg, self) - - else: - pass - - pkg_name = f"_{pkg_name}" - self.__setattr__(pkg_name, pkg) - if pkg is not None: - self.package_dict[pkg_name[1:]] = pkg - - def change_model_ws(self, model_ws): - """ - Method to change the model ws for writing a zonebudget - model. - - Parameters - ---------- - model_ws : str - new model directory - - """ - self._model_ws = model_ws - - def change_model_name(self, name): - """ - Method to change the model name for writing a zonebudget - model. - - Parameters - ---------- - name : str - new model name - - """ - self._name = name - if self._zon is not None: - self._zon.filename = f"{name}.{self._zon.filename.split('.')[-1]}" - - def get_dataframes( - self, - start_datetime=None, - timeunit="D", - index_key="totim", - names=None, - zones=None, - net=False, - pivot=False, - ): - """ - Get pandas dataframes. - - Parameters - ---------- - - start_datetime : str - Datetime string indicating the time at which the simulation starts. - timeunit : str - String that indicates the time units used in the model. - index_key : str - Indicates the fields to be used (in addition to "record") in the - resulting DataFrame multi-index. - names : list of strings - A list of strings containing the names of the records desired. - zones : list of ints or strings - A list of integer zone numbers or zone names desired. - net : boolean - If True, returns net IN-OUT for each record. - pivot : bool - If True, returns data in a more user friendly fashion - - Returns - ------- - df : Pandas DataFrame - Pandas DataFrame with the budget information. - - Examples - -------- - >>> from flopy.utils.zonbud import ZoneBudget6 - >>> zb6 = ZoneBudget6.load("my_nam_file", model_ws="my_model_ws") - >>> zb6.run_model() - >>> df = zb6.get_dataframes() - - """ - recarray = self.get_budget( - names=names, zones=zones, net=net, pivot=pivot - ) - - return _recarray_to_dataframe( - recarray, - self._zon._zonenamedict, - start_datetime=start_datetime, - timeunit=timeunit, - index_key=index_key, - zones=zones, - pivot=pivot, - ) - - def get_budget( - self, f=None, names=None, zones=None, net=False, pivot=False - ): - """ - Method to read and get zonebudget output - - Parameters - ---------- - f : str - zonebudget output file name - names : list of strings - A list of strings containing the names of the records desired. - zones : list of ints or strings - A list of integer zone numbers or zone names desired. - net : boolean - If True, returns net IN-OUT for each record. - pivot : bool - Method to pivot recordarray into a more user friendly method - for working with data - - Returns - ------- - np.recarray - """ - aliases = None - if self._zon is not None: - aliases = self._zon.aliases - - if f is None and self._recarray is None: - f = os.path.join(self._model_ws, f"{self._name}.csv") - self._recarray = _read_zb_csv2( - f, add_prefix=False, aliases=aliases - ) - elif f is None: - pass - else: - self._recarray = _read_zb_csv2( - f, add_prefix=False, aliases=aliases - ) - - recarray = _get_budget( - self._recarray, - self._zon._zonenamedict, - names=names, - zones=zones, - net=net, - ) - - if pivot: - recarray = _pivot_recarray(recarray) - - return recarray - - def get_volumetric_budget( - self, modeltime, recarray=None, extrapolate_kper=False - ): - """ - Method to generate a volumetric budget table based on flux information - - Parameters - ---------- - modeltime : flopy.discretization.ModelTime object - ModelTime object for calculating volumes - recarray : np.recarray - optional, user can pass in a numpy recarray to calculate volumetric - budget. recarray must be pivoted before passing to - get_volumetric_budget - extrapolate_kper : bool - flag to determine if we fill in data gaps with other - timestep information from the same stress period. - if True, we assume that flux is constant throughout a stress period - and the pandas dataframe returned contains a - volumetric budget per stress period - - if False, calculates volumes from available flux data - - Returns - ------- - pd.DataFrame - - """ - if recarray is None: - recarray = self.get_budget(pivot=True) - return _volumetric_flux(recarray, modeltime, extrapolate_kper) - - def write_input(self, line_length=20): - """ - Method to write a ZoneBudget 6 model to file - - Parameters - ---------- - line_length : int - length of line for izone array - - """ - nam = [] - for pkg_nam, pkg in self.package_dict.items(): - if pkg_nam in ("grb", "bud"): - path = os.path.relpath(pkg.filename, self._model_ws) - else: - path = pkg.filename - pkg.write_input(line_length=line_length) - nam.append(f" {pkg_nam.upper()} {path}\n") - - path = os.path.join(self._model_ws, self._name + self._extension) - with open(path, "w") as foo: - foo.write("BEGIN ZONEBUDGET\n") - foo.writelines(nam) - foo.write("END ZONEBUDGET\n") - - @staticmethod - def load(nam_file, model_ws="."): - """ - Method to load a zonebudget model from namefile - - Parameters - ---------- - nam_file : str - zonebudget name file - model_ws : str - model workspace path - - Returns - ------- - ZoneBudget6 object - """ - from ..utils.flopy_io import multi_line_strip - - name = nam_file.split(".")[0] - zb6 = ZoneBudget6(name=name, model_ws=model_ws) - with open(os.path.join(model_ws, nam_file)) as foo: - line = multi_line_strip(foo) - if "begin" in line: - while True: - t = multi_line_strip(foo).split() - if t[0] == "end": - break - else: - zb6.add_package(t[0], t[1]) - - return zb6 - - def export(self, f, ml, **kwargs): - """ - Method to export a netcdf file, or add zonebudget output to - an open netcdf file instance - - Parameters - ---------- - f : str or flopy.export.netcdf.NetCdf object - ml : flopy.modflow.Modflow or flopy.mf6.ModflowGwf object - **kwargs : - logger : flopy.export.netcdf.Logger instance - masked_vals : list - list of values to mask - - Returns - ------- - flopy.export.netcdf.NetCdf object - - """ - from flopy.export.utils import output_helper - - if isinstance(f, str): - if not f.endswith(".nc"): - raise AssertionError( - "File extension must end with .nc to " - "export a netcdf file" - ) - - zbncfobj = dataframe_to_netcdf_fmt( - self.get_dataframes(pivot=True), self._zon.izone, flux=True - ) - oudic = {"zbud": zbncfobj} - return output_helper(f, ml, oudic, **kwargs) - - -class ZoneFile6: - """ - Class to build, read, write and edit MODFLOW 6 zonebudget zone files - - Parameters - ---------- - model : ZoneBudget6 object - model object - izone : np.array - numpy array of zone numbers - extension : str - zone file extension name, defaults to ".zon" - aliases : dict - optional dictionary of zone aliases. ex. {1 : "nw_model"} - """ - - def __init__(self, model, izone, extension=".zon", aliases=None): - self.izone = izone - - if not extension.startswith("."): - extension = "." + extension - - self._extension = extension - self._parent = model - self._parent.add_package("zon", self) - self.filename = self._parent.name + extension - self.aliases = aliases - self.allzones = [int(z) for z in np.unique(izone) if z != 0] - self._zonenamedict = {z: f"ZONE_{z}" for z in self.allzones} - - if aliases is not None: - if not isinstance(aliases, dict): - raise TypeError("aliases parameter must be a dictionary") - - pop_list = [] - for zn, alias in aliases.items(): - if zn in self._zonenamedict: - self._zonenamedict[zn] = "_".join(alias.split()) - self.aliases[zn] = "_".join(alias.split()) - else: - pop_list.append(zn) - print(f"warning: zone number {zn} not found") - - for p in pop_list: - aliases.pop(p) - - @property - def ncells(self): - """ - Method to get number of model cells - - """ - return self.izone.size - - def write_input(self, f=None, line_length=20): - """ - Method to write the zonebudget 6 file - - Parameters - ---------- - f : str - zone file name - line_length : int - maximum length of line to write in izone array - """ - if f is None: - f = os.path.join(self._parent.model_ws, self.filename) - - with open(f, "w") as foo: - bfmt = [" {:d}"] - foo.write( - f"BEGIN DIMENSIONS\n NCELLS {self.ncells}\n" - "END DIMENSIONS\n\n" - ) - - foo.write("BEGIN GRIDDATA\n IZONE\n") - foo.write(" INTERNAL FACTOR 1 IPRN 0\n") - izone = np.ravel(self.izone) - i0 = 0 - i1 = line_length - while i1 < self.izone.size: - fmt = "".join(bfmt * line_length) - foo.write(fmt.format(*izone[i0:i1])) - foo.write("\n") - i0 = i1 - i1 += line_length - i1 = self.izone.size - i0 - fmt = "".join(bfmt * i1) - foo.write(fmt.format(*izone[i0:])) - foo.write("\nEND GRIDDATA\n") - - @staticmethod - def load(f, model): - """ - Method to load a Zone file for zonebudget 6. - - Parameter - --------- - f : str - zone file name - model : ZoneBudget6 object - zonebudget 6 model object - - Returns - ------- - ZoneFile6 object - - """ - from ..utils.flopy_io import multi_line_strip - - pkg_ws = os.path.split(f)[0] - with open(f) as foo: - t = [0] - while t[0] != "ncells": - t = multi_line_strip(foo).split() - - ncells = int(t[1]) - - t = [0] - while t[0] != "izone": - t = multi_line_strip(foo).split() - - method = multi_line_strip(foo).split()[0] - - if method in ("internal", "open/close"): - izone = np.zeros((ncells,), dtype=int) - i = 0 - fobj = foo - if method == "open/close": - fobj = open(os.path.join(pkg_ws, t[1])) - while i < ncells: - t = multi_line_strip(fobj) - if t[0] == "open/close": - if fobj != foo: - fobj.close() - fobj = open(os.path.join(pkg_ws, t[1])) - for zn in t: - izone[i] = zn - i += 1 - else: - izone = np.array([t[1]] * ncells, dtype=int) - - zon = ZoneFile6(model, izone) - return zon - - -def _numpyvoid2numeric(a): - # The budget record array has multiple dtypes and a slice returns - # the flexible-type numpy.void which must be converted to a numeric - # type prior to performing reducing functions such as sum() or - # mean() - return np.array([list(r) for r in a]) - - -def sum_flux_tuples(fromzones, tozones, fluxes): - tup = zip(fromzones, tozones, fluxes) - sorted_tups = sort_tuple(tup) - - # Group the sorted tuples by (from zone, to zone) - # itertools.groupby() returns the index (from zone, to zone) and - # a list of the tuples with that index - from_zones = [] - to_zones = [] - fluxes = [] - for (fz, tz), ftup in groupby(sorted_tups, lambda tup: tup[:2]): - f = np.sum([tup[-1] for tup in list(ftup)]) - from_zones.append(fz) - to_zones.append(tz) - fluxes.append(f) - return np.array(from_zones), np.array(to_zones), np.array(fluxes) - - -def sort_tuple(tup, n=2): - """Sort a tuple by the first n values - - tup: tuple - input tuple - n : int - values to sort tuple by (default is 2) - - Returns - ------- - tup : tuple - tuple sorted by the first n values - - """ - return tuple(sorted(tup, key=lambda t: t[:n])) - - -def _recarray_to_dataframe( - recarray, - zonenamedict, - start_datetime=None, - timeunit="D", - index_key="totim", - zones=None, - pivot=False, -): - """ - Method to convert zonebudget recarrays to pandas dataframes - - Parameters - ---------- - recarray : - zonenamedict : - start_datetime : - timeunit : - index_key : - names : - zones : - net : - - Returns - ------- - - pd.DataFrame - """ - pd = import_optional_dependency( - "pandas", - error_message="ZoneBudget.get_dataframes() requires pandas.", - ) - - valid_index_keys = ["totim", "kstpkper"] - s = f'index_key "{index_key}" is not valid.' - assert index_key in valid_index_keys, s - - valid_timeunit = ["S", "M", "H", "D", "Y"] - - if timeunit.upper() == "SECONDS": - timeunit = "S" - elif timeunit.upper() == "MINUTES": - timeunit = "M" - elif timeunit.upper() == "HOURS": - timeunit = "H" - elif timeunit.upper() == "DAYS": - timeunit = "D" - elif timeunit.upper() == "YEARS": - timeunit = "Y" - - errmsg = ( - f"Specified time units ({timeunit}) not recognized. Please use one of " - ) - assert timeunit in valid_timeunit, errmsg + ", ".join(valid_timeunit) + "." - - df = pd.DataFrame().from_records(recarray) - if start_datetime is not None and "totim" in list(df): - totim = totim_to_datetime( - df.totim, - start=pd.to_datetime(start_datetime), - timeunit=timeunit, - ) - df["datetime"] = totim - if pivot: - return pd.DataFrame.from_records(recarray) - - index_cols = ["datetime", "name"] - else: - if pivot: - return pd.DataFrame.from_records(recarray) - - if index_key == "totim" and "totim" in list(df): - index_cols = ["totim", "name"] - else: - index_cols = ["time_step", "stress_period", "name"] - - df = df.set_index(index_cols) # .sort_index(level=0) - if zones is not None: - keep_cols = zones - else: - keep_cols = zonenamedict.values() - return df.loc[:, keep_cols] - - -def _get_budget(recarray, zonenamedict, names=None, zones=None, net=False): - """ - Get a list of zonebudget record arrays. - - Parameters - ---------- - recarray : np.recarray - budget recarray - zonenamedict : dict - dictionary of zone names - names : list of strings - A list of strings containing the names of the records desired. - zones : list of ints or strings - A list of integer zone numbers or zone names desired. - net : boolean - If True, returns net IN-OUT for each record. - - Returns - ------- - budget_list : list of record arrays - A list of the zonebudget record arrays. - - """ - if isinstance(names, str): - names = [names] - if isinstance(zones, str): - zones = [zones] - elif isinstance(zones, int): - zones = [zones] - standard_fields = ["time_step", "stress_period", "name"] - if "totim" in recarray.dtype.names: - standard_fields.insert(0, "totim") - select_fields = standard_fields + list(zonenamedict.values()) - select_records = np.where((recarray["name"] == recarray["name"])) - if zones is not None: - for idx, z in enumerate(zones): - if isinstance(z, int): - zones[idx] = zonenamedict[z] - select_fields = standard_fields + zones - - if names is not None: - names = _clean_budget_names(recarray, names) - select_records = np.in1d(recarray["name"], names) - if net: - if names is None: - names = _clean_budget_names(recarray, _get_record_names(recarray)) - net_budget = _compute_net_budget(recarray, zonenamedict) - seen = [] - net_names = [] - for name in names: - if name.endswith("_IN") or name.endswith("_OUT"): - iname = "_".join(name.split("_")[:-1]) - else: - iname = "_".join(name.split("_")[1:]) - if iname not in seen: - seen.append(iname) - else: - net_names.append(iname) - select_records = np.in1d(net_budget["name"], net_names) - return net_budget[select_fields][select_records] - else: - return recarray[select_fields][select_records] - - -def _clean_budget_names(recarray, names): - """ - Method to clean budget names - - Parameters - ---------- - recarray : np.recarray - - names : list - list of names in recarray - - Returns - ------- - list - """ - newnames = [] - mbnames = ["TOTAL_IN", "TOTAL_OUT", "IN-OUT", "PERCENT_DISCREPANCY"] - for name in names: - if name in mbnames: - newnames.append(name) - elif ( - not name.startswith("FROM_") - and not name.startswith("TO_") - and not name.endswith("_IN") - and not name.endswith("_OUT") - ): - newname_in = "FROM_" + name.upper() - newname_out = "TO_" + name.upper() - if newname_in in recarray["name"]: - newnames.append(newname_in) - if newname_out in recarray["name"]: - newnames.append(newname_out) - else: - if name in recarray["name"]: - newnames.append(name) - return newnames - - -def _get_record_names(recarray, stripped=False): - """ - Get a list of water budget record names in the file. - - Returns - ------- - out : list of strings - List of unique text names in the binary file. - - """ - rec_names = np.unique(recarray["name"]) - if not stripped: - return rec_names - else: - seen = [] - for recname in rec_names: - if recname in ["IN-OUT", "TOTAL_IN", "TOTAL_OUT", "IN_OUT"]: - continue - if recname.endswith("_IN"): - recname = recname[:-3] - elif recname.endswith("_OUT"): - recname = recname[:-4] - if recname not in seen: - seen.append(recname) - seen.extend(["IN-OUT", "TOTAL", "IN_OUT"]) - return np.array(seen) - - -def _compute_net_budget(recarray, zonenamedict): - """ - - :param recarray: - :param zonenamedict: - :return: - """ - recnames = _get_record_names(recarray) - innames = [ - n for n in recnames if n.startswith("FROM_") or n.endswith("_IN") - ] - outnames = [ - n for n in recnames if n.startswith("TO_") or n.endswith("_OUT") - ] - select_fields = ["totim", "time_step", "stress_period", "name"] + list( - zonenamedict.values() - ) - if "totim" not in recarray.dtype.names: - select_fields.pop(0) - - select_records_in = np.in1d(recarray["name"], innames) - select_records_out = np.in1d(recarray["name"], outnames) - in_budget = recarray[select_fields][select_records_in] - out_budget = recarray[select_fields][select_records_out] - net_budget = in_budget.copy() - for f in [n for n in zonenamedict.values() if n in select_fields]: - net_budget[f] = np.array([r for r in in_budget[f]]) - np.array( - [r for r in out_budget[f]] - ) - newnames = [] - for n in net_budget["name"]: - if n.endswith("_IN") or n.endswith("_OUT"): - newnames.append("_".join(n.split("_")[:-1])) - else: - newnames.append("_".join(n.split("_")[1:])) - net_budget["name"] = newnames - return net_budget - - -def _read_zb_zblst(fname): - """Method to read zonebudget zblst output - - Parameters - ---------- - fname : str - zonebudget output file name - - Returns - ------- - np.recarray - """ - with open(fname) as foo: - - data = {} - read_data = False - flow_budget = False - empty = 0 - prefix = "" - while True: - line = foo.readline().strip().upper() - t = line.split() - if t: - if t[-1].strip() == "ZONES.": - line = foo.readline().strip() - zones = [int(i) for i in line.split()] - for zone in zones: - data[f"TO_ZONE_{zone}"] = [] - data[f"FROM_ZONE_{zone}"] = [] - - if "FLOW BUDGET FOR ZONE" in line: - flow_budget = True - read_data = False - zlist = [] - empty = 0 - t = line.split() - zone = int(t[4]) - if len(t[7]) > 4: - t.insert(8, t[7][4:]) - kstp = int(t[8]) - 1 - if len(t[11]) > 6: - t.append(t[11][6:]) - kper = int(t[12]) - 1 - if "ZONE" not in data: - data["ZONE"] = [zone] - data["KSTP"] = [kstp] - data["KPER"] = [kper] - else: - data["ZONE"].append(zone) - data["KSTP"].append(kstp) - data["KPER"].append(kper) - - elif line in ("", " "): - empty += 1 - - elif read_data: - if "=" in line: - t = line.split("=") - label = t[0].strip() - if "ZONE" in line: - if prefix == "FROM_": - zlist.append(int(label.split()[1])) - label = f"FROM_ZONE_{label.split()[1]}" - else: - label = f"TO_ZONE_{label.split()[-1]}" - - elif "TOTAL" in line or "PERCENT DISCREPANCY" in line: - label = "_".join(label.split()) - - elif "IN - OUT" in line: - label = "IN-OUT" - - else: - label = prefix + "_".join(label.split()) - - if label in data: - data[label].append(float(t[1])) - else: - data[label] = [float(t[1])] - - if label == "PERCENT_DISCREPANCY": - # fill in non-connected zones with zeros... - for zone in zones: - if zone in zlist: - continue - data[f"FROM_ZONE_{zone}"].append(0) - data[f"TO_ZONE_{zone}"].append(0) - - elif "OUT:" in line: - prefix = "TO_" - - else: - pass - - elif flow_budget: - if "IN:" in line: - prefix = "FROM_" - read_data = True - flow_budget = False - - else: - pass - - if empty >= 30: - break - - return _zb_dict_to_recarray(data) - - -def _read_zb_csv(fname): - """Method to read zonebudget csv output - - Parameters - ---------- - fname : str - zonebudget output file name - - Returns - ------- - np.recarray - """ - with open(fname) as foo: - data = {} - zone_header = False - read_data = False - empty = 0 - while True: - line = foo.readline().strip().upper() - - if "TIME STEP" in line: - t = line.split(",") - kstp = int(t[1]) - 1 - kper = int(t[3]) - 1 - totim = float(t[5]) - if "KSTP" not in data: - data["KSTP"] = [] - data["KPER"] = [] - data["TOTIM"] = [] - data["ZONE"] = [] - - zone_header = True - empty = 0 - - elif zone_header: - t = line.split(",") - zones = [int(i.split()[-1]) for i in t[1:] if i not in ("",)] - - for zone in zones: - data["KSTP"].append(kstp) - data["KPER"].append(kper) - data["ZONE"].append(zone) - data["TOTIM"].append(totim) - - zone_header = False - read_data = True - - elif read_data: - - t = line.split(",") - if "IN" in t[1]: - prefix = "FROM_" - - elif "OUT" in t[1]: - prefix = "TO_" - - else: - if "ZONE" in t[0] or "TOTAL" in t[0] or "IN-OUT" in t[0]: - label = "_".join(t[0].split()) - elif "PERCENT ERROR" in line: - label = "_".join(t[0].split()) - read_data = False - else: - label = prefix + "_".join(t[0].split()) - - if label not in data: - data[label] = [] - - for val in t[1:]: - if val in ("",): - continue - - data[label].append(float(val)) - - elif line in ("", " "): - empty += 1 - - else: - pass - - if empty >= 25: - break - - return _zb_dict_to_recarray(data) - - -def _read_zb_csv2(fname, add_prefix=True, aliases=None): - """ - Method to read CSV2 output from zonebudget and CSV output - from Zonebudget6 - - Parameters - ---------- - fname : str - zonebudget output file name - add_prefix : bool - boolean flag to add "TO_", "FROM_" prefixes to column headings - Returns - ------- - np.recarray - """ - with open(fname) as foo: - # read the header and create the dtype - h = foo.readline().upper().strip().split(",") - h = [i.strip() for i in h if i] - dtype = [] - prefix = "FROM_" - for col in h: - col = col.replace("-", "_") - if not add_prefix: - prefix = "" - if col in ("TOTIM", "PERIOD", "STEP", "KSTP", "KPER", "ZONE"): - if col in ("ZONE", "STEP", "KPER", "KSTP", "PERIOD"): - if col == "STEP": - col = "KSTP" - elif col == "PERIOD": - col = "KPER" - dtype.append((col, int)) - - else: - dtype.append((col, float)) - - elif col == "TOTAL IN": - dtype.append(("_".join(col.split()), float)) - prefix = "TO_" - elif col == "TOTAL OUT": - dtype.append(("_".join(col.split()), float)) - prefix = "" - elif col in ("FROM OTHER ZONES", "TO OTHER ZONES"): - dtype.append(("_".join(col.split()), float)) - elif col == "IN_OUT": - dtype.append(("IN-OUT", float)) - else: - dtype.append((prefix + "_".join(col.split()), float)) - - array = np.genfromtxt(foo, delimiter=",").T - if len(array) != len(dtype): - array = array[:-1] - array.shape = (len(dtype), -1) - data = {name[0]: list(array[ix]) for ix, name in enumerate(dtype)} - data["KPER"] = list(np.array(data["KPER"]) - 1) - data["KSTP"] = list(np.array(data["KSTP"]) - 1) - return _zb_dict_to_recarray(data, aliases=aliases) - - -def _zb_dict_to_recarray(data, aliases=None): - """ - Method to check the zonebudget dictionary and convert it to a - numpy recarray. - - Parameters - ---------- - data : dict - dictionary of zonebudget data from CSV 1 or ZBLST files - - Returns - ------- - np.recarray - """ - # if steady state is used, storage will not be written - if "FROM_STORAGE" in data: - if len(data["FROM_STORAGE"]) < len(data["ZONE"]): - adj = len(data["ZONE"]) - len(data["FROM_STORAGE"]) - adj = [0] * adj - data["FROM_STORAGE"] = adj + data["FROM_STORAGE"] - data["TO_STORAGE"] = adj + data["TO_STORAGE"] - - zones = list(np.unique(data["ZONE"])) - zone_dtypes = [] - for zn in zones: - if aliases is not None: - if zn in aliases: - zone_dtypes.append((aliases[zn], float)) - else: - zone_dtypes.append((f"ZONE_{int(zn)}", float)) - else: - zone_dtypes.append((f"ZONE_{int(zn)}", float)) - - dtype = [ - ("totim", float), - ("time_step", int), - ("stress_period", int), - ("name", object), - ] + zone_dtypes - - if "TOTIM" not in data: - dtype.pop(0) - - array = [] - allzones = data["ZONE"] - for strt in range(0, len(data["ZONE"]), len(zones)): - end = strt + len(zones) - kstp = data["KSTP"][strt] - kper = data["KPER"][strt] - totim = None - if "TOTIM" in data: - totim = data["TOTIM"][strt] - - for name, values in data.items(): - if name in ("KSTP", "KPER", "TOTIM", "ZONE"): - continue - rec = [kstp, kper, name] - if totim is not None: - rec = [totim] + rec - tmp = values[strt:end] - tzones = allzones[strt:end] - # check zone numbering matches header numbering, if not re-order - if tzones != zones: - idx = [zones.index(z) for z in tzones] - tmp = [tmp[i] for i in idx] - - array.append(tuple(rec + tmp)) - - array = np.array(array, dtype=dtype) - return array.view(type=np.recarray) - - -def _pivot_recarray(recarray): - """ - Method to pivot the zb output recarray to be compatible - with the ZoneBudgetOutput method until the class is deprecated - - Returns - ------- - - """ - dtype = [("totim", float), ("kper", int), ("kstp", int), ("zone", int)] - record_names = np.unique(recarray["name"]) - for rec_name in record_names: - dtype.append((rec_name, float)) - - rnames = recarray.dtype.names - zones = {i: int(i.split("_")[-1]) for i in rnames if i.startswith("ZONE")} - - kstp_kper = np.vstack( - sorted({(rec["time_step"], rec["stress_period"]) for rec in recarray}) - ) - pvt_rec = np.recarray((1,), dtype=dtype) - n = 0 - for kstp, kper in kstp_kper: - idxs = np.where( - (recarray["time_step"] == kstp) - & (recarray["stress_period"] == kper) - ) - if len(idxs) == 0: - pass - else: - temp = recarray[idxs] - for zonename, zone in zones.items(): - if n != 0: - pvt_rec.resize((len(pvt_rec) + 1,), refcheck=False) - pvt_rec["kstp"][-1] = kstp - pvt_rec["kper"][-1] = kper - pvt_rec["zone"][-1] = zone - for rec in temp: - pvt_rec[rec["name"]][-1] = rec[zonename] - - if "totim" in rnames: - pvt_rec["totim"][-1] = temp["totim"][-1] - else: - pvt_rec["totim"][-1] = 0 - - n += 1 - return pvt_rec - - -def _volumetric_flux(recarray, modeltime, extrapolate_kper=False): - """ - Method to generate a volumetric budget table based on flux information - - Parameters - ---------- - recarray : np.recarray - pivoted numpy recarray of zonebudget fluxes - modeltime : flopy.discretization.ModelTime object - flopy modeltime object - extrapolate_kper : bool - flag to determine if we fill in data gaps with other - timestep information from the same stress period. - if True, we assume that flux is constant throughout a stress period - and the pandas dataframe returned contains a - volumetric budget per stress period - - if False, calculates volumes from available flux data - - Returns - ------- - pd.DataFrame - - """ - pd = import_optional_dependency( - "pandas", - error_message="ZoneBudget._volumetric_flux() requires pandas.", - ) - - nper = len(modeltime.nstp) - volumetric_data = {} - zones = np.unique(recarray["zone"]) - - for key in recarray.dtype.names: - volumetric_data[key] = [] - - if extrapolate_kper: - volumetric_data.pop("kstp") - perlen = modeltime.perlen - totim = np.add.accumulate(perlen) - for per in range(nper): - idx = np.where(recarray["kper"] == per)[0] - - if len(idx) == 0: - continue - - temp = recarray[idx] - - for zone in zones: - if zone == 0: - continue - - zix = np.where(temp["zone"] == zone)[0] - - if len(zix) == 0: - raise Exception - - for key in recarray.dtype.names: - if key == "totim": - volumetric_data[key].append(totim[per]) - - elif key == "tslen": - volumetric_data["perlen"].append(perlen[per]) - - elif key == "kstp": - continue - - elif key == "kper": - volumetric_data[key].append(per) - - elif key == "zone": - volumetric_data[key].append(zone) - - else: - tmp = np.nanmean(temp[zix][key]) - vol = tmp * perlen[per] - volumetric_data[key].append(vol) - - else: - n = 0 - tslen = {} - dtotim = {} - totim = modeltime.totim - for ix, nstp in enumerate(modeltime.nstp): - for stp in range(nstp): - idx = np.where( - (recarray["kper"] == ix) & (recarray["kstp"] == stp) - ) - if len(idx[0]) == 0: - continue - elif n == 0: - tslen[(stp, ix)] = totim[n] - else: - tslen[(stp, ix)] = totim[n] - totim[n - 1] - dtotim[(stp, ix)] = totim[n] - n += 1 - - ltslen = [tslen[(rec["kstp"], rec["kper"])] for rec in recarray] - if len(np.unique(recarray["totim"])) == 1: - ltotim = [dtotim[(rec["kstp"], rec["kper"])] for rec in recarray] - recarray["totim"] = ltotim - - for name in recarray.dtype.names: - if name in ("zone", "kstp", "kper", "tslen", "totim"): - volumetric_data[name] = recarray[name] - else: - volumetric_data[name] = recarray[name] * ltslen - - return pd.DataFrame.from_dict(volumetric_data) - - -def dataframe_to_netcdf_fmt(df, zone_array, flux=True): - """ - Method to transform a volumetric zonebudget dataframe into - array format for netcdf. - - time is on axis 0 - zone is on axis 1 - - Parameters - ---------- - df : pd.DataFrame - zone_array : np.ndarray - zonebudget zones array - flux : bool - boolean flag to indicate if budget data is a flux "L^3/T" (True, - default) or if the data have been processed to - volumetric values "L^3" (False) - - Returns - ------- - ZBNetOutput object - - """ - zones = np.sort(np.unique(df.zone.values)) - totim = np.sort(np.unique(df.totim.values)) - - data = {} - for col in df.columns: - if col in ("totim", "zone", "kper", "kstp", "perlen"): - pass - else: - data[col] = np.zeros((totim.size, zones.size), dtype=float) - - for i, time in enumerate(totim): - tdf = df.loc[ - df.totim.isin( - [ - time, - ] - ) - ] - tdf = tdf.sort_values(by=["zone"]) - - for col in df.columns: - if col in ("totim", "zone", "kper", "kstp", "perlen"): - pass - else: - data[col][i, :] = tdf[col].values - - return ZBNetOutput(zones, totim, data, zone_array, flux=flux) - - -class ZBNetOutput: - """ - Class that holds zonebudget netcdf output and allows export utilities - to recognize the output data type. - - Parameters - ---------- - zones : np.ndarray - array of zone numbers - time : np.ndarray - array of totim - arrays : dict - dictionary of budget term arrays. - axis 0 is totim, - axis 1 is zones - flux : bool - boolean flag to indicate if budget data is a flux "L^3/T"(True, - default) or if the data have been processed to - volumetric values "L^3" (False) - """ - - def __init__(self, zones, time, arrays, zone_array, flux=True): - self.zones = zones - self.time = time - self.arrays = arrays - self.zone_array = zone_array - self.flux = flux +import os +import copy +import numpy as np +from itertools import groupby +from .utils_def import totim_to_datetime +from . import import_optional_dependency + + +class ZoneBudget: + """ + ZoneBudget class + + Parameters + ---------- + cbc_file : str or CellBudgetFile object + The file name or CellBudgetFile object for which budgets will be + computed. + z : ndarray + The array containing to zones to be used. + kstpkper : tuple of ints + A tuple containing the time step and stress period (kstp, kper). + The kstp and kper values are zero based. + totim : float + The simulation time. + aliases : dict + A dictionary with key, value pairs of zones and aliases. Replaces + the corresponding record and field names with the aliases provided. + When using this option in conjunction with a list of zones, the + zone(s) passed may either be all strings (aliases), all integers, + or mixed. + + Returns + ------- + None + + Examples + -------- + + >>> from flopy.utils.zonbud import ZoneBudget + >>> zon = ZoneBudget.read_zone_file('zone_input_file') + >>> zb = ZoneBudget('zonebudtest.cbc', zon, kstpkper=(0, 0)) + >>> zb.to_csv('zonebudtest.csv') + >>> zb_mgd = zb * 7.48052 / 1000000 + """ + + def __init__( + self, + cbc_file, + z, + kstpkper=None, + totim=None, + aliases=None, + verbose=False, + **kwargs, + ): + from .binaryfile import CellBudgetFile + + if isinstance(cbc_file, CellBudgetFile): + self.cbc = cbc_file + elif isinstance(cbc_file, str) and os.path.isfile(cbc_file): + self.cbc = CellBudgetFile(cbc_file) + else: + raise Exception(f"Cannot load cell budget file: {cbc_file}.") + + if isinstance(z, np.ndarray): + assert np.issubdtype( + z.dtype, np.integer + ), "Zones dtype must be integer" + else: + e = ( + "Please pass zones as a numpy ndarray of (positive)" + " integers. {}".format(z.dtype) + ) + raise Exception(e) + + # Check for negative zone values + if np.any(z < 0): + raise Exception( + "Negative zone value(s) found:", np.unique(z[z < 0]) + ) + + self.dis = None + if "model" in kwargs.keys(): + self.model = kwargs.pop("model") + self.dis = self.model.dis + if "dis" in kwargs.keys(): + self.dis = kwargs.pop("dis") + if len(kwargs.keys()) > 0: + args = ",".join(kwargs.keys()) + raise Exception(f"LayerFile error: unrecognized kwargs: {args}") + + # Check the shape of the cbc budget file arrays + self.cbc_shape = self.cbc.get_data(idx=0, full3D=True)[0].shape + self.nlay, self.nrow, self.ncol = self.cbc_shape + self.cbc_times = self.cbc.get_times() + self.cbc_kstpkper = self.cbc.get_kstpkper() + self.kstpkper = None + self.totim = None + + if kstpkper is not None: + if isinstance(kstpkper, tuple): + kstpkper = [kstpkper] + for kk in kstpkper: + s = f"The specified time step/stress period does not exist {kk}" + assert kk in self.cbc.get_kstpkper(), s + self.kstpkper = kstpkper + elif totim is not None: + if isinstance(totim, float): + totim = [totim] + elif isinstance(totim, int): + totim = [float(totim)] + for t in totim: + s = f"The specified simulation time does not exist {t}" + assert t in self.cbc.get_times(), s + self.totim = totim + else: + # No time step/stress period or simulation time pass + self.kstpkper = self.cbc.get_kstpkper() + + # Set float and integer types + self.float_type = np.float32 + self.int_type = np.int32 + + # Check dimensions of input zone array + s = ( + "Row/col dimensions of zone array {}" + " do not match model row/col dimensions {}".format( + z.shape, self.cbc_shape + ) + ) + assert z.shape[-2] == self.nrow and z.shape[-1] == self.ncol, s + + if z.shape == self.cbc_shape: + izone = z.copy() + elif len(z.shape) == 2: + izone = np.zeros(self.cbc_shape, self.int_type) + izone[:] = z[:, :] + elif len(z.shape) == 3 and z.shape[0] == 1: + izone = np.zeros(self.cbc_shape, self.int_type) + izone[:] = z[0, :, :] + else: + e = f"Shape of the zone array is not recognized: {z.shape}" + raise Exception(e) + + self.izone = izone + self.allzones = np.unique(izone) + self._zonenamedict = {z: f"ZONE_{z}" for z in self.allzones} + + if aliases is not None: + s = ( + "Input aliases not recognized. Please pass a dictionary " + "with key,value pairs of zone/alias." + ) + assert isinstance(aliases, dict), s + # Replace the relevant field names (ignore zone 0) + seen = [] + for z, a in iter(aliases.items()): + if z != 0 and z in self._zonenamedict.keys(): + if z in seen: + raise Exception( + "Zones may not have more than 1 alias." + ) + self._zonenamedict[z] = "_".join(a.split()) + seen.append(z) + + # self._iflow_recnames = self._get_internal_flow_record_names() + + # All record names in the cell-by-cell budget binary file + self.record_names = [ + n.strip() for n in self.cbc.get_unique_record_names(decode=True) + ] + + # Get imeth for each record in the CellBudgetFile record list + self.imeth = {} + for record in self.cbc.recordarray: + self.imeth[record["text"].strip().decode("utf-8")] = record[ + "imeth" + ] + + # INTERNAL FLOW TERMS ARE USED TO CALCULATE FLOW BETWEEN ZONES. + # CONSTANT-HEAD TERMS ARE USED TO IDENTIFY WHERE CONSTANT-HEAD CELLS + # ARE AND THEN USE FACE FLOWS TO DETERMINE THE AMOUNT OF FLOW. + # SWIADDTO--- terms are used by the SWI2 groundwater flow process. + internal_flow_terms = [ + "CONSTANT HEAD", + "FLOW RIGHT FACE", + "FLOW FRONT FACE", + "FLOW LOWER FACE", + "SWIADDTOCH", + "SWIADDTOFRF", + "SWIADDTOFFF", + "SWIADDTOFLF", + ] + + # Source/sink/storage term record names + # These are all of the terms that are not related to constant + # head cells or face flow terms + self.ssst_record_names = [ + n for n in self.record_names if n not in internal_flow_terms + ] + + # Initialize budget recordarray + array_list = [] + if self.kstpkper is not None: + for kk in self.kstpkper: + recordarray = self._initialize_budget_recordarray( + kstpkper=kk, totim=None + ) + array_list.append(recordarray) + elif self.totim is not None: + for t in self.totim: + recordarray = self._initialize_budget_recordarray( + kstpkper=None, totim=t + ) + array_list.append(recordarray) + self._budget = np.concatenate(array_list, axis=0) + + # Update budget record array + if self.kstpkper is not None: + for kk in self.kstpkper: + if verbose: + s = ( + "Computing the budget for" + " time step {} in stress period {}".format( + kk[0] + 1, kk[1] + 1 + ) + ) + print(s) + self._compute_budget(kstpkper=kk) + elif self.totim is not None: + for t in self.totim: + if verbose: + s = f"Computing the budget for time {t}" + print(s) + self._compute_budget(totim=t) + + def _compute_budget(self, kstpkper=None, totim=None): + """ + Creates a budget for the specified zone array. This function only + supports the use of a single time step/stress period or time. + + Parameters + ---------- + kstpkper : tuple + Tuple of kstp and kper to compute budget for (default is None). + totim : float + Totim to compute budget for (default is None). + + Returns + ------- + None + + """ + # Initialize an array to track where the constant head cells + # are located. + ich = np.zeros(self.cbc_shape, self.int_type) + swiich = np.zeros(self.cbc_shape, self.int_type) + + if "CONSTANT HEAD" in self.record_names: + """ + C-----CONSTANT-HEAD FLOW -- DON'T ACCUMULATE THE CELL-BY-CELL VALUES FOR + C-----CONSTANT-HEAD FLOW BECAUSE THEY MAY INCLUDE PARTIALLY CANCELING + C-----INS AND OUTS. USE CONSTANT-HEAD TERM TO IDENTIFY WHERE CONSTANT- + C-----HEAD CELLS ARE AND THEN USE FACE FLOWS TO DETERMINE THE AMOUNT OF + C-----FLOW. STORE CONSTANT-HEAD LOCATIONS IN ICH ARRAY. + """ + chd = self.cbc.get_data( + text="CONSTANT HEAD", + full3D=True, + kstpkper=kstpkper, + totim=totim, + )[0] + ich[np.ma.where(chd != 0.0)] = 1 + if "FLOW RIGHT FACE" in self.record_names: + self._accumulate_flow_frf("FLOW RIGHT FACE", ich, kstpkper, totim) + if "FLOW FRONT FACE" in self.record_names: + self._accumulate_flow_fff("FLOW FRONT FACE", ich, kstpkper, totim) + if "FLOW LOWER FACE" in self.record_names: + self._accumulate_flow_flf("FLOW LOWER FACE", ich, kstpkper, totim) + if "SWIADDTOCH" in self.record_names: + swichd = self.cbc.get_data( + text="SWIADDTOCH", full3D=True, kstpkper=kstpkper, totim=totim + )[0] + swiich[swichd != 0] = 1 + if "SWIADDTOFRF" in self.record_names: + self._accumulate_flow_frf("SWIADDTOFRF", swiich, kstpkper, totim) + if "SWIADDTOFFF" in self.record_names: + self._accumulate_flow_fff("SWIADDTOFFF", swiich, kstpkper, totim) + if "SWIADDTOFLF" in self.record_names: + self._accumulate_flow_flf("SWIADDTOFLF", swiich, kstpkper, totim) + + # NOT AN INTERNAL FLOW TERM, SO MUST BE A SOURCE TERM OR STORAGE + # ACCUMULATE THE FLOW BY ZONE + # iterate over remaining items in the list + for recname in self.ssst_record_names: + self._accumulate_flow_ssst(recname, kstpkper, totim) + + # Compute mass balance terms + self._compute_mass_balance(kstpkper, totim) + + return + + def _add_empty_record( + self, recordarray, recname, kstpkper=None, totim=None + ): + """ + Build an empty records based on the specified flow direction and + record name for the given list of zones. + + Parameters + ---------- + recordarray : + recname : + kstpkper : tuple + Tuple of kstp and kper to compute budget for (default is None). + totim : float + Totim to compute budget for (default is None). + + Returns + ------- + recordarray : np.recarray + + """ + if kstpkper is not None: + if len(self.cbc_times) > 0: + totim = self.cbc_times[self.cbc_kstpkper.index(kstpkper)] + else: + totim = 0.0 + elif totim is not None: + if len(self.cbc_times) > 0: + kstpkper = self.cbc_kstpkper[self.cbc_times.index(totim)] + else: + kstpkper = (0, 0) + + row = [totim, kstpkper[0], kstpkper[1], recname] + row += [0.0 for _ in self._zonenamedict.values()] + recs = np.array(tuple(row), dtype=recordarray.dtype) + recordarray = np.append(recordarray, recs) + return recordarray + + def _initialize_budget_recordarray(self, kstpkper=None, totim=None): + """ + Initialize the budget record array which will store all of the + fluxes in the cell-budget file. + + Parameters + ---------- + kstpkper : tuple + Tuple of kstp and kper to compute budget for (default is None). + totim : float + Totim to compute budget for (default is None). + + Returns + ------- + + """ + + # Create empty array for the budget terms. + dtype_list = [ + ("totim", "= 2: + data = self.cbc.get_data( + text=recname, kstpkper=kstpkper, totim=totim + )[0] + + # "FLOW RIGHT FACE" COMPUTE FLOW BETWEEN ZONES ACROSS COLUMNS. + # COMPUTE FLOW ONLY BETWEEN A ZONE AND A HIGHER ZONE -- FLOW FROM + # ZONE 4 TO 3 IS THE NEGATIVE OF FLOW FROM 3 TO 4. + # 1ST, CALCULATE FLOW BETWEEN NODE J,I,K AND J-1,I,K + + k, i, j = np.where( + self.izone[:, :, 1:] > self.izone[:, :, :-1] + ) + + # Adjust column values to account for the starting position of "nz" + j += 1 + + # Define the zone to which flow is going + nz = self.izone[k, i, j] + + # Define the zone from which flow is coming + jl = j - 1 + nzl = self.izone[k, i, jl] + + # Get the face flow + q = data[k, i, jl] + + # Get indices where flow face values are positive (flow out of higher zone) + # Don't include CH to CH flow (can occur if CHTOCH option is used) + # Create an iterable tuple of (from zone, to zone, flux) + # Then group tuple by (from_zone, to_zone) and sum the flux values + idx = np.where( + (q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1)) + ) + fzi, tzi, fi = sum_flux_tuples(nzl[idx], nz[idx], q[idx]) + self._update_budget_fromfaceflow( + fzi, tzi, np.abs(fi), kstpkper, totim + ) + + # Get indices where flow face values are negative (flow into higher zone) + # Don't include CH to CH flow (can occur if CHTOCH option is used) + # Create an iterable tuple of (from zone, to zone, flux) + # Then group tuple by (from_zone, to_zone) and sum the flux values + idx = np.where( + (q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1)) + ) + fzi, tzi, fi = sum_flux_tuples(nz[idx], nzl[idx], q[idx]) + self._update_budget_fromfaceflow( + fzi, tzi, np.abs(fi), kstpkper, totim + ) + + # FLOW BETWEEN NODE J,I,K AND J+1,I,K + k, i, j = np.where( + self.izone[:, :, :-1] > self.izone[:, :, 1:] + ) + + # Define the zone from which flow is coming + nz = self.izone[k, i, j] + + # Define the zone to which flow is going + jr = j + 1 + nzr = self.izone[k, i, jr] + + # Get the face flow + q = data[k, i, j] + + # Get indices where flow face values are positive (flow out of higher zone) + # Don't include CH to CH flow (can occur if CHTOCH option is used) + # Create an iterable tuple of (from zone, to zone, flux) + # Then group tuple by (from_zone, to_zone) and sum the flux values + idx = np.where( + (q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1)) + ) + fzi, tzi, fi = sum_flux_tuples(nz[idx], nzr[idx], q[idx]) + self._update_budget_fromfaceflow( + fzi, tzi, np.abs(fi), kstpkper, totim + ) + + # Get indices where flow face values are negative (flow into higher zone) + # Don't include CH to CH flow (can occur if CHTOCH option is used) + # Create an iterable tuple of (from zone, to zone, flux) + # Then group tuple by (from_zone, to_zone) and sum the flux values + idx = np.where( + (q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1)) + ) + fzi, tzi, fi = sum_flux_tuples(nzr[idx], nz[idx], q[idx]) + self._update_budget_fromfaceflow( + fzi, tzi, np.abs(fi), kstpkper, totim + ) + + # CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION + k, i, j = np.where(ich == 1) + k, i, j = k[j > 0], i[j > 0], j[j > 0] + jl = j - 1 + nzl = self.izone[k, i, jl] + nz = self.izone[k, i, j] + q = data[k, i, jl] + idx = np.where( + (q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1)) + ) + fzi, tzi, f = sum_flux_tuples(nzl[idx], nz[idx], q[idx]) + fz = ["TO_CONSTANT_HEAD"] * len(tzi) + tz = [self._zonenamedict[z] for z in tzi] + self._update_budget_fromssst( + fz, tz, np.abs(f), kstpkper, totim + ) + + idx = np.where( + (q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1)) + ) + fzi, tzi, f = sum_flux_tuples(nzl[idx], nz[idx], q[idx]) + fz = ["FROM_CONSTANT_HEAD"] * len(fzi) + tz = [self._zonenamedict[z] for z in tzi[tzi != 0]] + self._update_budget_fromssst( + fz, tz, np.abs(f), kstpkper, totim + ) + + k, i, j = np.where(ich == 1) + k, i, j = ( + k[j < self.ncol - 1], + i[j < self.ncol - 1], + j[j < self.ncol - 1], + ) + nz = self.izone[k, i, j] + jr = j + 1 + nzr = self.izone[k, i, jr] + q = data[k, i, j] + idx = np.where( + (q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1)) + ) + fzi, tzi, f = sum_flux_tuples(nzr[idx], nz[idx], q[idx]) + fz = ["FROM_CONSTANT_HEAD"] * len(tzi) + tz = [self._zonenamedict[z] for z in tzi] + self._update_budget_fromssst( + fz, tz, np.abs(f), kstpkper, totim + ) + + idx = np.where( + (q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1)) + ) + fzi, tzi, f = sum_flux_tuples(nzr[idx], nz[idx], q[idx]) + fz = ["TO_CONSTANT_HEAD"] * len(fzi) + tz = [self._zonenamedict[z] for z in tzi] + self._update_budget_fromssst( + fz, tz, np.abs(f), kstpkper, totim + ) + + except Exception as e: + print(e) + raise + return + + def _accumulate_flow_fff(self, recname, ich, kstpkper, totim): + """ + + Parameters + ---------- + recname + ich + kstpkper + totim + + Returns + ------- + + """ + try: + if self.nrow >= 2: + data = self.cbc.get_data( + text=recname, kstpkper=kstpkper, totim=totim + )[0] + + # "FLOW FRONT FACE" + # CALCULATE FLOW BETWEEN NODE J,I,K AND J,I-1,K + k, i, j = np.where( + self.izone[:, 1:, :] < self.izone[:, :-1, :] + ) + i += 1 + ia = i - 1 + nza = self.izone[k, ia, j] + nz = self.izone[k, i, j] + q = data[k, ia, j] + idx = np.where( + (q > 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1)) + ) + fzi, tzi, fi = sum_flux_tuples(nza[idx], nz[idx], q[idx]) + self._update_budget_fromfaceflow( + fzi, tzi, np.abs(fi), kstpkper, totim + ) + + idx = np.where( + (q < 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1)) + ) + fzi, tzi, fi = sum_flux_tuples(nz[idx], nza[idx], q[idx]) + self._update_budget_fromfaceflow( + fzi, tzi, np.abs(fi), kstpkper, totim + ) + + # CALCULATE FLOW BETWEEN NODE J,I,K AND J,I+1,K. + k, i, j = np.where( + self.izone[:, :-1, :] < self.izone[:, 1:, :] + ) + nz = self.izone[k, i, j] + ib = i + 1 + nzb = self.izone[k, ib, j] + q = data[k, i, j] + idx = np.where( + (q > 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1)) + ) + fzi, tzi, fi = sum_flux_tuples(nz[idx], nzb[idx], q[idx]) + self._update_budget_fromfaceflow( + fzi, tzi, np.abs(fi), kstpkper, totim + ) + + idx = np.where( + (q < 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1)) + ) + fzi, tzi, fi = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) + self._update_budget_fromfaceflow( + fzi, tzi, np.abs(fi), kstpkper, totim + ) + + # CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION + k, i, j = np.where(ich == 1) + k, i, j = k[i > 0], i[i > 0], j[i > 0] + ia = i - 1 + nza = self.izone[k, ia, j] + nz = self.izone[k, i, j] + q = data[k, ia, j] + idx = np.where( + (q > 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1)) + ) + fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx]) + fz = ["TO_CONSTANT_HEAD"] * len(tzi) + tz = [self._zonenamedict[z] for z in tzi] + self._update_budget_fromssst( + fz, tz, np.abs(f), kstpkper, totim + ) + + idx = np.where( + (q < 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1)) + ) + fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx]) + fz = ["FROM_CONSTANT_HEAD"] * len(fzi) + tz = [self._zonenamedict[z] for z in tzi] + self._update_budget_fromssst( + fz, tz, np.abs(f), kstpkper, totim + ) + + k, i, j = np.where(ich == 1) + k, i, j = ( + k[i < self.nrow - 1], + i[i < self.nrow - 1], + j[i < self.nrow - 1], + ) + nz = self.izone[k, i, j] + ib = i + 1 + nzb = self.izone[k, ib, j] + q = data[k, i, j] + idx = np.where( + (q > 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1)) + ) + fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) + fz = ["FROM_CONSTANT_HEAD"] * len(tzi) + tz = [self._zonenamedict[z] for z in tzi] + self._update_budget_fromssst( + fz, tz, np.abs(f), kstpkper, totim + ) + + idx = np.where( + (q < 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1)) + ) + fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) + fz = ["TO_CONSTANT_HEAD"] * len(fzi) + tz = [self._zonenamedict[z] for z in tzi] + self._update_budget_fromssst( + fz, tz, np.abs(f), kstpkper, totim + ) + + except Exception as e: + print(e) + raise + return + + def _accumulate_flow_flf(self, recname, ich, kstpkper, totim): + """ + + Parameters + ---------- + recname + ich + kstpkper + totim + + Returns + ------- + + """ + try: + if self.nlay >= 2: + data = self.cbc.get_data( + text=recname, kstpkper=kstpkper, totim=totim + )[0] + + # "FLOW LOWER FACE" + # CALCULATE FLOW BETWEEN NODE J,I,K AND J,I,K-1 + k, i, j = np.where( + self.izone[1:, :, :] < self.izone[:-1, :, :] + ) + k += 1 + ka = k - 1 + nza = self.izone[ka, i, j] + nz = self.izone[k, i, j] + q = data[ka, i, j] + idx = np.where( + (q > 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1)) + ) + fzi, tzi, fi = sum_flux_tuples(nza[idx], nz[idx], q[idx]) + self._update_budget_fromfaceflow( + fzi, tzi, np.abs(fi), kstpkper, totim + ) + + idx = np.where( + (q < 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1)) + ) + fzi, tzi, fi = sum_flux_tuples(nz[idx], nza[idx], q[idx]) + self._update_budget_fromfaceflow( + fzi, tzi, np.abs(fi), kstpkper, totim + ) + + # CALCULATE FLOW BETWEEN NODE J,I,K AND J,I,K+1 + k, i, j = np.where( + self.izone[:-1, :, :] < self.izone[1:, :, :] + ) + nz = self.izone[k, i, j] + kb = k + 1 + nzb = self.izone[kb, i, j] + q = data[k, i, j] + idx = np.where( + (q > 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1)) + ) + fzi, tzi, fi = sum_flux_tuples(nz[idx], nzb[idx], q[idx]) + self._update_budget_fromfaceflow( + fzi, tzi, np.abs(fi), kstpkper, totim + ) + + idx = np.where( + (q < 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1)) + ) + fzi, tzi, fi = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) + self._update_budget_fromfaceflow( + fzi, tzi, np.abs(fi), kstpkper, totim + ) + + # CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION + k, i, j = np.where(ich == 1) + k, i, j = k[k > 0], i[k > 0], j[k > 0] + ka = k - 1 + nza = self.izone[ka, i, j] + nz = self.izone[k, i, j] + q = data[ka, i, j] + idx = np.where( + (q > 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1)) + ) + fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx]) + fz = ["TO_CONSTANT_HEAD"] * len(tzi) + tz = [self._zonenamedict[z] for z in tzi] + self._update_budget_fromssst( + fz, tz, np.abs(f), kstpkper, totim + ) + + idx = np.where( + (q < 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1)) + ) + fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx]) + fz = ["FROM_CONSTANT_HEAD"] * len(fzi) + tz = [self._zonenamedict[z] for z in tzi] + self._update_budget_fromssst( + fz, tz, np.abs(f), kstpkper, totim + ) + + k, i, j = np.where(ich == 1) + k, i, j = ( + k[k < self.nlay - 1], + i[k < self.nlay - 1], + j[k < self.nlay - 1], + ) + nz = self.izone[k, i, j] + kb = k + 1 + nzb = self.izone[kb, i, j] + q = data[k, i, j] + idx = np.where( + (q > 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1)) + ) + fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) + fz = ["FROM_CONSTANT_HEAD"] * len(tzi) + tz = [self._zonenamedict[z] for z in tzi] + self._update_budget_fromssst( + fz, tz, np.abs(f), kstpkper, totim + ) + + idx = np.where( + (q < 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1)) + ) + fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) + fz = ["TO_CONSTANT_HEAD"] * len(fzi) + tz = [self._zonenamedict[z] for z in tzi] + self._update_budget_fromssst( + fz, tz, np.abs(f), kstpkper, totim + ) + + except Exception as e: + print(e) + raise + return + + def _accumulate_flow_ssst(self, recname, kstpkper, totim): + + # NOT AN INTERNAL FLOW TERM, SO MUST BE A SOURCE TERM OR STORAGE + # ACCUMULATE THE FLOW BY ZONE + + imeth = self.imeth[recname] + + data = self.cbc.get_data(text=recname, kstpkper=kstpkper, totim=totim) + if len(data) == 0: + # Empty data, can occur during the first time step of a transient + # model when storage terms are zero and not in the cell-budget + # file. + return + else: + data = data[0] + + if imeth == 2 or imeth == 5: + # LIST + qin = np.ma.zeros( + (self.nlay * self.nrow * self.ncol), self.float_type + ) + qout = np.ma.zeros( + (self.nlay * self.nrow * self.ncol), self.float_type + ) + for [node, q] in zip(data["node"], data["q"]): + idx = node - 1 + if q > 0: + qin.data[idx] += q + elif q < 0: + qout.data[idx] += q + qin = np.ma.reshape(qin, (self.nlay, self.nrow, self.ncol)) + qout = np.ma.reshape(qout, (self.nlay, self.nrow, self.ncol)) + elif imeth == 0 or imeth == 1: + # FULL 3-D ARRAY + qin = np.ma.zeros(self.cbc_shape, self.float_type) + qout = np.ma.zeros(self.cbc_shape, self.float_type) + qin[data > 0] = data[data > 0] + qout[data < 0] = data[data < 0] + elif imeth == 3: + # 1-LAYER ARRAY WITH LAYER INDICATOR ARRAY + rlay, rdata = data[0], data[1] + data = np.ma.zeros(self.cbc_shape, self.float_type) + for (r, c), l in np.ndenumerate(rlay): + data[l - 1, r, c] = rdata[r, c] + qin = np.ma.zeros(self.cbc_shape, self.float_type) + qout = np.ma.zeros(self.cbc_shape, self.float_type) + qin[data > 0] = data[data > 0] + qout[data < 0] = data[data < 0] + elif imeth == 4: + # 1-LAYER ARRAY THAT DEFINES LAYER 1 + qin = np.ma.zeros(self.cbc_shape, self.float_type) + qout = np.ma.zeros(self.cbc_shape, self.float_type) + r, c = np.where(data > 0) + qin[0, r, c] = data[r, c] + r, c = np.where(data < 0) + qout[0, r, c] = data[r, c] + else: + # Should not happen + raise Exception( + f'Unrecognized "imeth" for {recname} record: {imeth}' + ) + + # Inflows + fz = [] + tz = [] + f = [] + for z in self.allzones: + if z != 0: + flux = qin[(self.izone == z)].sum() + if type(flux) == np.ma.core.MaskedConstant: + flux = 0.0 + fz.append("FROM_" + "_".join(recname.split())) + tz.append(self._zonenamedict[z]) + f.append(flux) + fz = np.array(fz) + tz = np.array(tz) + f = np.array(f) + self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim) + + # Outflows + fz = [] + tz = [] + f = [] + for z in self.allzones: + if z != 0: + flux = qout[(self.izone == z)].sum() + if type(flux) == np.ma.core.MaskedConstant: + flux = 0.0 + fz.append("TO_" + "_".join(recname.split())) + tz.append(self._zonenamedict[z]) + f.append(flux) + fz = np.array(fz) + tz = np.array(tz) + f = np.array(f) + self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim) + + def _compute_mass_balance(self, kstpkper, totim): + # Returns a record array with total inflow, total outflow, + # and percent error summed by column. + skipcols = ["time_step", "stress_period", "totim", "name"] + + # Compute inflows + recnames = self.get_record_names() + innames = [n for n in recnames if n.startswith("FROM_")] + outnames = [n for n in recnames if n.startswith("TO_")] + if kstpkper is not None: + rowidx = np.where( + (self._budget["time_step"] == kstpkper[0]) + & (self._budget["stress_period"] == kstpkper[1]) + & np.in1d(self._budget["name"], innames) + ) + elif totim is not None: + rowidx = np.where( + (self._budget["totim"] == totim) + & np.in1d(self._budget["name"], innames) + ) + a = _numpyvoid2numeric( + self._budget[list(self._zonenamedict.values())][rowidx] + ) + intot = np.array(a.sum(axis=0)) + tz = np.array( + list([n for n in self._budget.dtype.names if n not in skipcols]) + ) + fz = np.array(["TOTAL_IN"] * len(tz)) + self._update_budget_fromssst(fz, tz, intot, kstpkper, totim) + + # Compute outflows + if kstpkper is not None: + rowidx = np.where( + (self._budget["time_step"] == kstpkper[0]) + & (self._budget["stress_period"] == kstpkper[1]) + & np.in1d(self._budget["name"], outnames) + ) + elif totim is not None: + rowidx = np.where( + (self._budget["totim"] == totim) + & np.in1d(self._budget["name"], outnames) + ) + a = _numpyvoid2numeric( + self._budget[list(self._zonenamedict.values())][rowidx] + ) + outot = np.array(a.sum(axis=0)) + tz = np.array( + list([n for n in self._budget.dtype.names if n not in skipcols]) + ) + fz = np.array(["TOTAL_OUT"] * len(tz)) + self._update_budget_fromssst(fz, tz, outot, kstpkper, totim) + + # Compute IN-OUT + tz = np.array( + list([n for n in self._budget.dtype.names if n not in skipcols]) + ) + f = intot - outot + fz = np.array(["IN-OUT"] * len(tz)) + self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim) + + # Compute percent discrepancy + tz = np.array( + list([n for n in self._budget.dtype.names if n not in skipcols]) + ) + fz = np.array(["PERCENT_DISCREPANCY"] * len(tz)) + in_minus_out = intot - outot + in_plus_out = intot + outot + f = 100 * in_minus_out / (in_plus_out / 2.0) + self._update_budget_fromssst(fz, tz, np.abs(f), kstpkper, totim) + + def get_model_shape(self): + """Get model shape + + Returns + ------- + nlay : int + Number of layers + nrow : int + Number of rows + ncol : int + Number of columns + + """ + return self.nlay, self.nrow, self.ncol + + def get_record_names(self, stripped=False): + """ + Get a list of water budget record names in the file. + + Returns + ------- + out : list of strings + List of unique text names in the binary file. + + Examples + -------- + + >>> zb = ZoneBudget('zonebudtest.cbc', zon, kstpkper=(0, 0)) + >>> recnames = zb.get_record_names() + + """ + return _get_record_names(self._budget, stripped=stripped) + + def get_budget(self, names=None, zones=None, net=False, pivot=False): + """ + Get a list of zonebudget record arrays. + + Parameters + ---------- + + names : list of strings + A list of strings containing the names of the records desired. + zones : list of ints or strings + A list of integer zone numbers or zone names desired. + net : boolean + If True, returns net IN-OUT for each record. + pivot : boolean + If True, returns data in a more user friendly format + + Returns + ------- + budget_list : list of record arrays + A list of the zonebudget record arrays. + + Examples + -------- + + >>> names = ['FROM_CONSTANT_HEAD', 'RIVER_LEAKAGE_OUT'] + >>> zones = ['ZONE_1', 'ZONE_2'] + >>> zb = ZoneBudget('zonebudtest.cbc', zon, kstpkper=(0, 0)) + >>> bud = zb.get_budget(names=names, zones=zones) + + """ + recarray = _get_budget( + self._budget, self._zonenamedict, names=names, zones=zones, net=net + ) + + if pivot: + recarray = _pivot_recarray(recarray) + + return recarray + + def get_volumetric_budget( + self, modeltime, recarray=None, extrapolate_kper=False + ): + """ + Method to generate a volumetric budget table based on flux information + + Parameters + ---------- + modeltime : flopy.discretization.ModelTime object + ModelTime object for calculating volumes + recarray : np.recarray + optional, user can pass in a numpy recarray to calculate volumetric + budget. recarray must be pivoted before passing to + get_volumetric_budget + extrapolate_kper : bool + flag to determine if we fill in data gaps with other + timestep information from the same stress period. + if True, we assume that flux is constant throughout a stress period + and the pandas dataframe returned contains a + volumetric budget per stress period + + if False, calculates volumes from available flux data + + Returns + ------- + pd.DataFrame + + """ + if recarray is None: + recarray = self.get_budget(pivot=True) + return _volumetric_flux(recarray, modeltime, extrapolate_kper) + + def to_csv(self, fname): + """ + Saves the budget record arrays to a formatted + comma-separated values file. + + Parameters + ---------- + fname : str + The name of the output comma-separated values file. + + Returns + ------- + None + + """ + # Needs updating to handle the new budget list structure. Write out + # budgets for all kstpkper if kstpkper is None or pass list of + # kstpkper/totim to save particular budgets. + with open(fname, "w") as f: + # Write header + f.write(",".join(self._budget.dtype.names) + "\n") + # Write rows + for rowidx in range(self._budget.shape[0]): + s = ( + ",".join([str(i) for i in list(self._budget[:][rowidx])]) + + "\n" + ) + f.write(s) + return + + def get_dataframes( + self, + start_datetime=None, + timeunit="D", + index_key="totim", + names=None, + zones=None, + net=False, + pivot=False, + ): + """ + Get pandas dataframes. + + Parameters + ---------- + + start_datetime : str + Datetime string indicating the time at which the simulation starts. + timeunit : str + String that indicates the time units used in the model. + index_key : str + Indicates the fields to be used (in addition to "record") in the + resulting DataFrame multi-index. + names : list of strings + A list of strings containing the names of the records desired. + zones : list of ints or strings + A list of integer zone numbers or zone names desired. + net : boolean + If True, returns net IN-OUT for each record. + pivot : bool + If True, returns dataframe in a more user friendly format + + Returns + ------- + df : Pandas DataFrame + Pandas DataFrame with the budget information. + + Examples + -------- + >>> from flopy.utils.zonbud import ZoneBudget + >>> zon = ZoneBudget.read_zone_file('zone_input_file') + >>> zb = ZoneBudget('zonebudtest.cbc', zon, kstpkper=(0, 0)) + >>> df = zb.get_dataframes() + + """ + recarray = self.get_budget(names, zones, net, pivot=pivot) + return _recarray_to_dataframe( + recarray, + self._zonenamedict, + start_datetime=start_datetime, + timeunit=timeunit, + index_key=index_key, + zones=zones, + pivot=pivot, + ) + + @classmethod + def _get_otype(cls, fname): + """ + Method to automatically distinguish output type based on the + zonebudget header + + Parameters + ---------- + fname : str + zonebudget output file name + + Returns + ------- + otype : int + + """ + with open(fname) as foo: + line = foo.readline() + if "zonebudget version" in line.lower(): + otype = 0 + elif "time step" in line.lower(): + otype = 1 + elif "totim" in line.lower(): + otype = 2 + else: + raise AssertionError("Cant distinguish output type") + return otype + + @classmethod + def read_output(cls, fname, net=False, dataframe=False, **kwargs): + """ + Method to read a zonebudget output file into a recarray or pandas + dataframe + + Parameters + ---------- + fname : str + zonebudget output file name + net : bool + boolean flag for net budget + dataframe : bool + boolean flag to return a pandas dataframe + + **kwargs + pivot : bool + + start_datetime : str + Datetime string indicating the time at which the simulation + starts. Can be used when pandas dataframe is requested + timeunit : str + String that indicates the time units used in the model. + + + Returns + ------- + np.recarray + """ + otype = ZoneBudget._get_otype(fname) + if otype == 0: + recarray = _read_zb_zblst(fname) + elif otype == 1: + recarray = _read_zb_csv(fname) + else: + add_prefix = kwargs.pop("add_prefix", True) + recarray = _read_zb_csv2(fname, add_prefix=add_prefix) + + zonenamdict = { + int(i.split("_")[-1]): i + for i in recarray.dtype.names + if i.startswith("ZONE") + } + pivot = kwargs.pop("pivot", False) + recarray = _get_budget(recarray, zonenamdict, net=net) + if pivot: + recarray = _pivot_recarray(recarray) + + if not dataframe: + return recarray + else: + start_datetime = kwargs.pop("start_datetime", None) + timeunit = kwargs.pop("timeunit", "D") + return _recarray_to_dataframe( + recarray, + zonenamdict, + start_datetime=start_datetime, + timeunit=timeunit, + pivot=pivot, + ) + + @classmethod + def read_zone_file(cls, fname): + """Method to read a zonebudget zone file into memory + + Parameters + ---------- + fname : str + zone file name + + Returns + ------- + zones : np.array + + """ + with open(fname, "r") as f: + lines = f.readlines() + + # Initialize layer + lay = 0 + + # Initialize data counter + totlen = 0 + i = 0 + + # First line contains array dimensions + dimstring = lines.pop(0).strip().split() + nlay, nrow, ncol = [int(v) for v in dimstring] + zones = np.zeros((nlay, nrow, ncol), dtype=np.int32) + + # The number of values to read before placing + # them into the zone array + datalen = nrow * ncol + + # List of valid values for LOCAT + locats = ["CONSTANT", "INTERNAL", "EXTERNAL"] + + # ITERATE OVER THE ROWS + for line in lines: + rowitems = line.strip().split() + + # Skip blank lines + if len(rowitems) == 0: + continue + + # HEADER + if rowitems[0].upper() in locats: + vals = [] + locat = rowitems[0].upper() + + if locat == "CONSTANT": + iconst = int(rowitems[1]) + else: + fmt = rowitems[1].strip("()") + fmtin, iprn = [int(v) for v in fmt.split("I")] + + # ZONE DATA + else: + if locat == "CONSTANT": + vals = np.ones((nrow, ncol), dtype=int) * iconst + lay += 1 + elif locat == "INTERNAL": + # READ ZONES + rowvals = [int(v) for v in rowitems] + s = "Too many values encountered on this line." + assert len(rowvals) <= fmtin, s + vals.extend(rowvals) + + elif locat == "EXTERNAL": + # READ EXTERNAL FILE + fname = rowitems[0] + if not os.path.isfile(fname): + errmsg = f'Could not find external file "{fname}"' + raise Exception(errmsg) + with open(fname, "r") as ext_f: + ext_flines = ext_f.readlines() + for ext_frow in ext_flines: + ext_frowitems = ext_frow.strip().split() + rowvals = [int(v) for v in ext_frowitems] + vals.extend(rowvals) + if len(vals) != datalen: + errmsg = ( + "The number of values read from external " + 'file "{}" does not match the expected ' + "number.".format(len(vals)) + ) + raise Exception(errmsg) + else: + # Should not get here + raise Exception(f"Locat not recognized: {locat}") + + # IGNORE COMPOSITE ZONES + + if len(vals) == datalen: + # place values for the previous layer into the zone array + vals = np.array(vals, dtype=int).reshape((nrow, ncol)) + zones[lay, :, :] = vals[:, :] + lay += 1 + totlen += len(rowitems) + i += 1 + s = ( + "The number of values read ({:,.0f})" + " does not match the number expected" + " ({:,.0f})".format(totlen, nlay * nrow * ncol) + ) + assert totlen == nlay * nrow * ncol, s + return zones + + @classmethod + def write_zone_file(cls, fname, array, fmtin=None, iprn=None): + """ + Saves a numpy array in a format readable by the zonebudget program + executable. + + File format: + line 1: nlay, nrow, ncol + line 2: INTERNAL (format) + line 3: begin data + . + . + . + + example from NACP: + 19 250 500 + INTERNAL (10I7) + 199 199 199 199 199 199 199 199 199 + 199 199 199 199 199 199 199 199 199 + ... + INTERNAL (10I7) + 199 199 199 199 199 199 199 199 199 + 199 199 199 199 199 199 199 199 199 + ... + + Parameters + ---------- + array : array + The array of zones to be written. + fname : str + The path and name of the file to be written. + fmtin : int + The number of values to write to each line. + iprn : int + Padding space to add between each value. + + Returns + ------- + + """ + if len(array.shape) == 2: + b = np.zeros((1, array.shape[0], array.shape[1]), dtype=np.int32) + b[0, :, :] = array[:, :] + array = b.copy() + elif len(array.shape) < 2 or len(array.shape) > 3: + raise Exception( + f"Shape of the input array is not recognized: {array.shape}" + ) + if np.ma.is_masked(array): + array = np.ma.filled(array, 0) + + nlay, nrow, ncol = array.shape + + if fmtin is not None: + assert fmtin <= ncol, ( + "The specified width is greater than the " + "number of columns in the array." + ) + else: + fmtin = ncol + + iprnmin = len(str(array.max())) + if iprn is None or iprn <= iprnmin: + iprn = iprnmin + 1 + + formatter_str = f"{{:>{iprn}}}" + formatter = formatter_str.format + + with open(fname, "w") as f: + header = f"{nlay} {nrow} {ncol}\n" + f.write(header) + for lay in range(nlay): + record_2 = f"INTERNAL\t({fmtin}I{iprn})\n" + f.write(record_2) + if fmtin < ncol: + for row in range(nrow): + rowvals = array[lay, row, :].ravel() + start = 0 + end = start + fmtin + vals = rowvals[start:end] + while len(vals) > 0: + s = ( + "".join([formatter(int(val)) for val in vals]) + + "\n" + ) + f.write(s) + start = end + end = start + fmtin + vals = rowvals[start:end] + + elif fmtin == ncol: + for row in range(nrow): + vals = array[lay, row, :].ravel() + f.write( + "".join([formatter(int(val)) for val in vals]) + + "\n" + ) + + def copy(self): + """ + Return a deepcopy of the object. + """ + return copy.deepcopy(self) + + def export(self, f, ml, **kwargs): + """ + Method to export a netcdf file, or add zonebudget output to + an open netcdf file instance + + Parameters + ---------- + f : str or flopy.export.netcdf.NetCdf object + ml : flopy.modflow.Modflow or flopy.mf6.ModflowGwf object + **kwargs : + logger : flopy.export.netcdf.Logger instance + masked_vals : list + list of values to mask + + Returns + ------- + flopy.export.netcdf.NetCdf object + + """ + from ..export.utils import output_helper + + if isinstance(f, str): + if not f.endswith(".nc"): + raise AssertionError( + "File extension must end with .nc to " + "export a netcdf file" + ) + + zbncfobj = dataframe_to_netcdf_fmt( + self.get_dataframes(pivot=True), self.izone, flux=True + ) + oudic = {"zbud": zbncfobj} + return output_helper(f, ml, oudic, **kwargs) + + def __deepcopy__(self, memo): + """ + Over-rides the default deepcopy behavior. Copy all attributes except + the CellBudgetFile object which does not copy nicely. + """ + cls = self.__class__ + result = cls.__new__(cls) + memo[id(self)] = result + ignore_attrs = ["cbc"] + for k, v in self.__dict__.items(): + if k not in ignore_attrs: + setattr(result, k, copy.deepcopy(v, memo)) + + # Set CellBudgetFile object attribute manually. This is object + # read-only so should not be problems with pointers from + # multiple objects. + result.cbc = self.cbc + return result + + def __mul__(self, other): + newbud = self._budget.copy() + for f in self._zonenamedict.values(): + newbud[f] = np.array([r for r in newbud[f]]) * other + idx = np.in1d(self._budget["name"], "PERCENT_DISCREPANCY") + newbud[:][idx] = self._budget[:][idx] + newobj = self.copy() + newobj._budget = newbud + return newobj + + def __truediv__(self, other): + newbud = self._budget.copy() + for f in self._zonenamedict.values(): + newbud[f] = np.array([r for r in newbud[f]]) / float(other) + idx = np.in1d(self._budget["name"], "PERCENT_DISCREPANCY") + newbud[:][idx] = self._budget[:][idx] + newobj = self.copy() + newobj._budget = newbud + return newobj + + def __div__(self, other): + newbud = self._budget.copy() + for f in self._zonenamedict.values(): + newbud[f] = np.array([r for r in newbud[f]]) / float(other) + idx = np.in1d(self._budget["name"], "PERCENT_DISCREPANCY") + newbud[:][idx] = self._budget[:][idx] + newobj = self.copy() + newobj._budget = newbud + return newobj + + def __add__(self, other): + newbud = self._budget.copy() + for f in self._zonenamedict.values(): + newbud[f] = np.array([r for r in newbud[f]]) + other + idx = np.in1d(self._budget["name"], "PERCENT_DISCREPANCY") + newbud[:][idx] = self._budget[:][idx] + newobj = self.copy() + newobj._budget = newbud + return newobj + + def __sub__(self, other): + newbud = self._budget.copy() + for f in self._zonenamedict.values(): + newbud[f] = np.array([r for r in newbud[f]]) - other + idx = np.in1d(self._budget["name"], "PERCENT_DISCREPANCY") + newbud[:][idx] = self._budget[:][idx] + newobj = self.copy() + newobj._budget = newbud + return newobj + + +class ZoneBudget6: + """ + Model class for building, editing and running MODFLOW 6 zonebuget + + Parameters + ---------- + name : str + model name for zonebudget + model_ws : str + path to model + exe_name : str + excutable name + extension : str + name file extension + """ + + def __init__( + self, + name="zonebud", + model_ws=".", + exe_name="zbud6", + extension=".zbnam", + ): + from ..mf6.utils import MfGrdFile + from .binaryfile import CellBudgetFile + + self._name = name + self._zon = None + self._grb = None + self._bud = None + self._model_ws = model_ws + self._exe_name = exe_name + + if not extension.startswith("."): + extension = "." + extension + + self._extension = extension + self.zbnam_packages = { + "zon": ZoneFile6, + "bud": CellBudgetFile, + "grb": MfGrdFile, + } + self.package_dict = {} + if self._zon is not None: + self.package_dict["zon"] = self._zon + if self._grb is not None: + self.package_dict["grb"] = self._grb + if self._bud is not None: + self.package_dict["bud"] = self._bud + + self._recarray = None + + def run_model(self, exe_name=None, nam_file=None, silent=False): + """ + Method to run a zonebudget model + + Parameters + ---------- + exe_name : str + optional zonebudget executable name + nam_file : str + optional zonebudget name file name + silent : bool + optional flag to silence output + + Returns + ------- + tuple + """ + from ..mbase import run_model + + if exe_name is None: + exe_name = self._exe_name + if nam_file is None: + nam_file = os.path.join(self._name + self._extension) + return run_model( + exe_name, nam_file, model_ws=self._model_ws, silent=silent + ) + + def __setattr__(self, key, value): + if key in ("zon", "bud", "grb", "cbc"): + self.add_package(key, value) + return + elif key == "model_ws": + raise AttributeError("please use change_model_ws() method") + elif key == "name": + self.change_model_name(value) + super().__setattr__(key, value) + + def __getattr__(self, item): + if item in ("zon", "bud", "grb", "name", "model_ws"): + item = f"_{item}" + return super().__getattribute__(item) + + def add_package(self, pkg_name, pkg): + """ + Method to add a package to the ZoneBudget6 object + + Parameters + ---------- + pkg_name : str + three letter package abbreviation + pkg : str or object + either a package file name or package object + + """ + pkg_name = pkg_name.lower() + if pkg_name not in self.zbnam_packages: + if pkg_name == "cbc": + pkg_name = "bud" + else: + raise KeyError( + f"{pkg_name} package is not valid for zonebudget" + ) + + if isinstance(pkg, str): + if os.path.exists(os.path.join(self._model_ws, pkg)): + pkg = os.path.join(self._model_ws, pkg) + + func = self.zbnam_packages[pkg_name] + if pkg_name in ("bud", "grb"): + pkg = func(pkg, precision="double") + else: + pkg = func.load(pkg, self) + + else: + pass + + pkg_name = f"_{pkg_name}" + self.__setattr__(pkg_name, pkg) + if pkg is not None: + self.package_dict[pkg_name[1:]] = pkg + + def change_model_ws(self, model_ws): + """ + Method to change the model ws for writing a zonebudget + model. + + Parameters + ---------- + model_ws : str + new model directory + + """ + self._model_ws = model_ws + + def change_model_name(self, name): + """ + Method to change the model name for writing a zonebudget + model. + + Parameters + ---------- + name : str + new model name + + """ + self._name = name + if self._zon is not None: + self._zon.filename = f"{name}.{self._zon.filename.split('.')[-1]}" + + def get_dataframes( + self, + start_datetime=None, + timeunit="D", + index_key="totim", + names=None, + zones=None, + net=False, + pivot=False, + ): + """ + Get pandas dataframes. + + Parameters + ---------- + + start_datetime : str + Datetime string indicating the time at which the simulation starts. + timeunit : str + String that indicates the time units used in the model. + index_key : str + Indicates the fields to be used (in addition to "record") in the + resulting DataFrame multi-index. + names : list of strings + A list of strings containing the names of the records desired. + zones : list of ints or strings + A list of integer zone numbers or zone names desired. + net : boolean + If True, returns net IN-OUT for each record. + pivot : bool + If True, returns data in a more user friendly fashion + + Returns + ------- + df : Pandas DataFrame + Pandas DataFrame with the budget information. + + Examples + -------- + >>> from flopy.utils.zonbud import ZoneBudget6 + >>> zb6 = ZoneBudget6.load("my_nam_file", model_ws="my_model_ws") + >>> zb6.run_model() + >>> df = zb6.get_dataframes() + + """ + recarray = self.get_budget( + names=names, zones=zones, net=net, pivot=pivot + ) + + return _recarray_to_dataframe( + recarray, + self._zon._zonenamedict, + start_datetime=start_datetime, + timeunit=timeunit, + index_key=index_key, + zones=zones, + pivot=pivot, + ) + + def get_budget( + self, f=None, names=None, zones=None, net=False, pivot=False + ): + """ + Method to read and get zonebudget output + + Parameters + ---------- + f : str + zonebudget output file name + names : list of strings + A list of strings containing the names of the records desired. + zones : list of ints or strings + A list of integer zone numbers or zone names desired. + net : boolean + If True, returns net IN-OUT for each record. + pivot : bool + Method to pivot recordarray into a more user friendly method + for working with data + + Returns + ------- + np.recarray + """ + aliases = None + if self._zon is not None: + aliases = self._zon.aliases + + if f is None and self._recarray is None: + f = os.path.join(self._model_ws, f"{self._name}.csv") + self._recarray = _read_zb_csv2( + f, add_prefix=False, aliases=aliases + ) + elif f is None: + pass + else: + self._recarray = _read_zb_csv2( + f, add_prefix=False, aliases=aliases + ) + + recarray = _get_budget( + self._recarray, + self._zon._zonenamedict, + names=names, + zones=zones, + net=net, + ) + + if pivot: + recarray = _pivot_recarray(recarray) + + return recarray + + def get_volumetric_budget( + self, modeltime, recarray=None, extrapolate_kper=False + ): + """ + Method to generate a volumetric budget table based on flux information + + Parameters + ---------- + modeltime : flopy.discretization.ModelTime object + ModelTime object for calculating volumes + recarray : np.recarray + optional, user can pass in a numpy recarray to calculate volumetric + budget. recarray must be pivoted before passing to + get_volumetric_budget + extrapolate_kper : bool + flag to determine if we fill in data gaps with other + timestep information from the same stress period. + if True, we assume that flux is constant throughout a stress period + and the pandas dataframe returned contains a + volumetric budget per stress period + + if False, calculates volumes from available flux data + + Returns + ------- + pd.DataFrame + + """ + if recarray is None: + recarray = self.get_budget(pivot=True) + return _volumetric_flux(recarray, modeltime, extrapolate_kper) + + def write_input(self, line_length=20): + """ + Method to write a ZoneBudget 6 model to file + + Parameters + ---------- + line_length : int + length of line for izone array + + """ + nam = [] + for pkg_nam, pkg in self.package_dict.items(): + if pkg_nam in ("grb", "bud"): + path = os.path.relpath(pkg.filename, self._model_ws) + else: + path = pkg.filename + pkg.write_input(line_length=line_length) + nam.append(f" {pkg_nam.upper()} {path}\n") + + path = os.path.join(self._model_ws, self._name + self._extension) + with open(path, "w") as foo: + foo.write("BEGIN ZONEBUDGET\n") + foo.writelines(nam) + foo.write("END ZONEBUDGET\n") + + @staticmethod + def load(nam_file, model_ws="."): + """ + Method to load a zonebudget model from namefile + + Parameters + ---------- + nam_file : str + zonebudget name file + model_ws : str + model workspace path + + Returns + ------- + ZoneBudget6 object + """ + from ..utils.flopy_io import multi_line_strip + + name = nam_file.split(".")[0] + zb6 = ZoneBudget6(name=name, model_ws=model_ws) + with open(os.path.join(model_ws, nam_file)) as foo: + line = multi_line_strip(foo) + if "begin" in line: + while True: + t = multi_line_strip(foo).split() + if t[0] == "end": + break + else: + zb6.add_package(t[0], t[1]) + + return zb6 + + def export(self, f, ml, **kwargs): + """ + Method to export a netcdf file, or add zonebudget output to + an open netcdf file instance + + Parameters + ---------- + f : str or flopy.export.netcdf.NetCdf object + ml : flopy.modflow.Modflow or flopy.mf6.ModflowGwf object + **kwargs : + logger : flopy.export.netcdf.Logger instance + masked_vals : list + list of values to mask + + Returns + ------- + flopy.export.netcdf.NetCdf object + + """ + from ..export.utils import output_helper + + if isinstance(f, str): + if not f.endswith(".nc"): + raise AssertionError( + "File extension must end with .nc to " + "export a netcdf file" + ) + + zbncfobj = dataframe_to_netcdf_fmt( + self.get_dataframes(pivot=True), self._zon.izone, flux=True + ) + oudic = {"zbud": zbncfobj} + return output_helper(f, ml, oudic, **kwargs) + + +class ZoneFile6: + """ + Class to build, read, write and edit MODFLOW 6 zonebudget zone files + + Parameters + ---------- + model : ZoneBudget6 object + model object + izone : np.array + numpy array of zone numbers + extension : str + zone file extension name, defaults to ".zon" + aliases : dict + optional dictionary of zone aliases. ex. {1 : "nw_model"} + """ + + def __init__(self, model, izone, extension=".zon", aliases=None): + self.izone = izone + + if not extension.startswith("."): + extension = "." + extension + + self._extension = extension + self._parent = model + self._parent.add_package("zon", self) + self.filename = self._parent.name + extension + self.aliases = aliases + self.allzones = [int(z) for z in np.unique(izone) if z != 0] + self._zonenamedict = {z: f"ZONE_{z}" for z in self.allzones} + + if aliases is not None: + if not isinstance(aliases, dict): + raise TypeError("aliases parameter must be a dictionary") + + pop_list = [] + for zn, alias in aliases.items(): + if zn in self._zonenamedict: + self._zonenamedict[zn] = "_".join(alias.split()) + self.aliases[zn] = "_".join(alias.split()) + else: + pop_list.append(zn) + print(f"warning: zone number {zn} not found") + + for p in pop_list: + aliases.pop(p) + + @property + def ncells(self): + """ + Method to get number of model cells + + """ + return self.izone.size + + def write_input(self, f=None, line_length=20): + """ + Method to write the zonebudget 6 file + + Parameters + ---------- + f : str + zone file name + line_length : int + maximum length of line to write in izone array + """ + if f is None: + f = os.path.join(self._parent.model_ws, self.filename) + + with open(f, "w") as foo: + bfmt = [" {:d}"] + foo.write( + f"BEGIN DIMENSIONS\n NCELLS {self.ncells}\n" + "END DIMENSIONS\n\n" + ) + + foo.write("BEGIN GRIDDATA\n IZONE\n") + foo.write(" INTERNAL FACTOR 1 IPRN 0\n") + izone = np.ravel(self.izone) + i0 = 0 + i1 = line_length + while i1 < self.izone.size: + fmt = "".join(bfmt * line_length) + foo.write(fmt.format(*izone[i0:i1])) + foo.write("\n") + i0 = i1 + i1 += line_length + i1 = self.izone.size - i0 + fmt = "".join(bfmt * i1) + foo.write(fmt.format(*izone[i0:])) + foo.write("\nEND GRIDDATA\n") + + @staticmethod + def load(f, model): + """ + Method to load a Zone file for zonebudget 6. + + Parameter + --------- + f : str + zone file name + model : ZoneBudget6 object + zonebudget 6 model object + + Returns + ------- + ZoneFile6 object + + """ + from ..utils.flopy_io import multi_line_strip + + pkg_ws = os.path.split(f)[0] + with open(f) as foo: + t = [0] + while t[0] != "ncells": + t = multi_line_strip(foo).split() + + ncells = int(t[1]) + + t = [0] + while t[0] != "izone": + t = multi_line_strip(foo).split() + + method = multi_line_strip(foo).split()[0] + + if method in ("internal", "open/close"): + izone = np.zeros((ncells,), dtype=int) + i = 0 + fobj = foo + if method == "open/close": + fobj = open(os.path.join(pkg_ws, t[1])) + while i < ncells: + t = multi_line_strip(fobj) + if t[0] == "open/close": + if fobj != foo: + fobj.close() + fobj = open(os.path.join(pkg_ws, t[1])) + for zn in t: + izone[i] = zn + i += 1 + else: + izone = np.array([t[1]] * ncells, dtype=int) + + zon = ZoneFile6(model, izone) + return zon + + +def _numpyvoid2numeric(a): + # The budget record array has multiple dtypes and a slice returns + # the flexible-type numpy.void which must be converted to a numeric + # type prior to performing reducing functions such as sum() or + # mean() + return np.array([list(r) for r in a]) + + +def sum_flux_tuples(fromzones, tozones, fluxes): + tup = zip(fromzones, tozones, fluxes) + sorted_tups = sort_tuple(tup) + + # Group the sorted tuples by (from zone, to zone) + # itertools.groupby() returns the index (from zone, to zone) and + # a list of the tuples with that index + from_zones = [] + to_zones = [] + fluxes = [] + for (fz, tz), ftup in groupby(sorted_tups, lambda tup: tup[:2]): + f = np.sum([tup[-1] for tup in list(ftup)]) + from_zones.append(fz) + to_zones.append(tz) + fluxes.append(f) + return np.array(from_zones), np.array(to_zones), np.array(fluxes) + + +def sort_tuple(tup, n=2): + """Sort a tuple by the first n values + + tup: tuple + input tuple + n : int + values to sort tuple by (default is 2) + + Returns + ------- + tup : tuple + tuple sorted by the first n values + + """ + return tuple(sorted(tup, key=lambda t: t[:n])) + + +def _recarray_to_dataframe( + recarray, + zonenamedict, + start_datetime=None, + timeunit="D", + index_key="totim", + zones=None, + pivot=False, +): + """ + Method to convert zonebudget recarrays to pandas dataframes + + Parameters + ---------- + recarray : + zonenamedict : + start_datetime : + timeunit : + index_key : + names : + zones : + net : + + Returns + ------- + + pd.DataFrame + """ + pd = import_optional_dependency( + "pandas", + error_message="ZoneBudget.get_dataframes() requires pandas.", + ) + + valid_index_keys = ["totim", "kstpkper"] + s = f'index_key "{index_key}" is not valid.' + assert index_key in valid_index_keys, s + + valid_timeunit = ["S", "M", "H", "D", "Y"] + + if timeunit.upper() == "SECONDS": + timeunit = "S" + elif timeunit.upper() == "MINUTES": + timeunit = "M" + elif timeunit.upper() == "HOURS": + timeunit = "H" + elif timeunit.upper() == "DAYS": + timeunit = "D" + elif timeunit.upper() == "YEARS": + timeunit = "Y" + + errmsg = ( + f"Specified time units ({timeunit}) not recognized. Please use one of " + ) + assert timeunit in valid_timeunit, errmsg + ", ".join(valid_timeunit) + "." + + df = pd.DataFrame().from_records(recarray) + if start_datetime is not None and "totim" in list(df): + totim = totim_to_datetime( + df.totim, + start=pd.to_datetime(start_datetime), + timeunit=timeunit, + ) + df["datetime"] = totim + if pivot: + return pd.DataFrame.from_records(recarray) + + index_cols = ["datetime", "name"] + else: + if pivot: + return pd.DataFrame.from_records(recarray) + + if index_key == "totim" and "totim" in list(df): + index_cols = ["totim", "name"] + else: + index_cols = ["time_step", "stress_period", "name"] + + df = df.set_index(index_cols) # .sort_index(level=0) + if zones is not None: + keep_cols = zones + else: + keep_cols = zonenamedict.values() + return df.loc[:, keep_cols] + + +def _get_budget(recarray, zonenamedict, names=None, zones=None, net=False): + """ + Get a list of zonebudget record arrays. + + Parameters + ---------- + recarray : np.recarray + budget recarray + zonenamedict : dict + dictionary of zone names + names : list of strings + A list of strings containing the names of the records desired. + zones : list of ints or strings + A list of integer zone numbers or zone names desired. + net : boolean + If True, returns net IN-OUT for each record. + + Returns + ------- + budget_list : list of record arrays + A list of the zonebudget record arrays. + + """ + if isinstance(names, str): + names = [names] + if isinstance(zones, str): + zones = [zones] + elif isinstance(zones, int): + zones = [zones] + standard_fields = ["time_step", "stress_period", "name"] + if "totim" in recarray.dtype.names: + standard_fields.insert(0, "totim") + select_fields = standard_fields + list(zonenamedict.values()) + select_records = np.where((recarray["name"] == recarray["name"])) + if zones is not None: + for idx, z in enumerate(zones): + if isinstance(z, int): + zones[idx] = zonenamedict[z] + select_fields = standard_fields + zones + + if names is not None: + names = _clean_budget_names(recarray, names) + select_records = np.in1d(recarray["name"], names) + if net: + if names is None: + names = _clean_budget_names(recarray, _get_record_names(recarray)) + net_budget = _compute_net_budget(recarray, zonenamedict) + seen = [] + net_names = [] + for name in names: + if name.endswith("_IN") or name.endswith("_OUT"): + iname = "_".join(name.split("_")[:-1]) + else: + iname = "_".join(name.split("_")[1:]) + if iname not in seen: + seen.append(iname) + else: + net_names.append(iname) + select_records = np.in1d(net_budget["name"], net_names) + return net_budget[select_fields][select_records] + else: + return recarray[select_fields][select_records] + + +def _clean_budget_names(recarray, names): + """ + Method to clean budget names + + Parameters + ---------- + recarray : np.recarray + + names : list + list of names in recarray + + Returns + ------- + list + """ + newnames = [] + mbnames = ["TOTAL_IN", "TOTAL_OUT", "IN-OUT", "PERCENT_DISCREPANCY"] + for name in names: + if name in mbnames: + newnames.append(name) + elif ( + not name.startswith("FROM_") + and not name.startswith("TO_") + and not name.endswith("_IN") + and not name.endswith("_OUT") + ): + newname_in = "FROM_" + name.upper() + newname_out = "TO_" + name.upper() + if newname_in in recarray["name"]: + newnames.append(newname_in) + if newname_out in recarray["name"]: + newnames.append(newname_out) + else: + if name in recarray["name"]: + newnames.append(name) + return newnames + + +def _get_record_names(recarray, stripped=False): + """ + Get a list of water budget record names in the file. + + Returns + ------- + out : list of strings + List of unique text names in the binary file. + + """ + rec_names = np.unique(recarray["name"]) + if not stripped: + return rec_names + else: + seen = [] + for recname in rec_names: + if recname in ["IN-OUT", "TOTAL_IN", "TOTAL_OUT", "IN_OUT"]: + continue + if recname.endswith("_IN"): + recname = recname[:-3] + elif recname.endswith("_OUT"): + recname = recname[:-4] + if recname not in seen: + seen.append(recname) + seen.extend(["IN-OUT", "TOTAL", "IN_OUT"]) + return np.array(seen) + + +def _compute_net_budget(recarray, zonenamedict): + """ + + :param recarray: + :param zonenamedict: + :return: + """ + recnames = _get_record_names(recarray) + innames = [ + n for n in recnames if n.startswith("FROM_") or n.endswith("_IN") + ] + outnames = [ + n for n in recnames if n.startswith("TO_") or n.endswith("_OUT") + ] + select_fields = ["totim", "time_step", "stress_period", "name"] + list( + zonenamedict.values() + ) + if "totim" not in recarray.dtype.names: + select_fields.pop(0) + + select_records_in = np.in1d(recarray["name"], innames) + select_records_out = np.in1d(recarray["name"], outnames) + in_budget = recarray[select_fields][select_records_in] + out_budget = recarray[select_fields][select_records_out] + net_budget = in_budget.copy() + for f in [n for n in zonenamedict.values() if n in select_fields]: + net_budget[f] = np.array([r for r in in_budget[f]]) - np.array( + [r for r in out_budget[f]] + ) + newnames = [] + for n in net_budget["name"]: + if n.endswith("_IN") or n.endswith("_OUT"): + newnames.append("_".join(n.split("_")[:-1])) + else: + newnames.append("_".join(n.split("_")[1:])) + net_budget["name"] = newnames + return net_budget + + +def _read_zb_zblst(fname): + """Method to read zonebudget zblst output + + Parameters + ---------- + fname : str + zonebudget output file name + + Returns + ------- + np.recarray + """ + with open(fname) as foo: + + data = {} + read_data = False + flow_budget = False + empty = 0 + prefix = "" + while True: + line = foo.readline().strip().upper() + t = line.split() + if t: + if t[-1].strip() == "ZONES.": + line = foo.readline().strip() + zones = [int(i) for i in line.split()] + for zone in zones: + data[f"TO_ZONE_{zone}"] = [] + data[f"FROM_ZONE_{zone}"] = [] + + if "FLOW BUDGET FOR ZONE" in line: + flow_budget = True + read_data = False + zlist = [] + empty = 0 + t = line.split() + zone = int(t[4]) + if len(t[7]) > 4: + t.insert(8, t[7][4:]) + kstp = int(t[8]) - 1 + if len(t[11]) > 6: + t.append(t[11][6:]) + kper = int(t[12]) - 1 + if "ZONE" not in data: + data["ZONE"] = [zone] + data["KSTP"] = [kstp] + data["KPER"] = [kper] + else: + data["ZONE"].append(zone) + data["KSTP"].append(kstp) + data["KPER"].append(kper) + + elif line in ("", " "): + empty += 1 + + elif read_data: + if "=" in line: + t = line.split("=") + label = t[0].strip() + if "ZONE" in line: + if prefix == "FROM_": + zlist.append(int(label.split()[1])) + label = f"FROM_ZONE_{label.split()[1]}" + else: + label = f"TO_ZONE_{label.split()[-1]}" + + elif "TOTAL" in line or "PERCENT DISCREPANCY" in line: + label = "_".join(label.split()) + + elif "IN - OUT" in line: + label = "IN-OUT" + + else: + label = prefix + "_".join(label.split()) + + if label in data: + data[label].append(float(t[1])) + else: + data[label] = [float(t[1])] + + if label == "PERCENT_DISCREPANCY": + # fill in non-connected zones with zeros... + for zone in zones: + if zone in zlist: + continue + data[f"FROM_ZONE_{zone}"].append(0) + data[f"TO_ZONE_{zone}"].append(0) + + elif "OUT:" in line: + prefix = "TO_" + + else: + pass + + elif flow_budget: + if "IN:" in line: + prefix = "FROM_" + read_data = True + flow_budget = False + + else: + pass + + if empty >= 30: + break + + return _zb_dict_to_recarray(data) + + +def _read_zb_csv(fname): + """Method to read zonebudget csv output + + Parameters + ---------- + fname : str + zonebudget output file name + + Returns + ------- + np.recarray + """ + with open(fname) as foo: + data = {} + zone_header = False + read_data = False + empty = 0 + while True: + line = foo.readline().strip().upper() + + if "TIME STEP" in line: + t = line.split(",") + kstp = int(t[1]) - 1 + kper = int(t[3]) - 1 + totim = float(t[5]) + if "KSTP" not in data: + data["KSTP"] = [] + data["KPER"] = [] + data["TOTIM"] = [] + data["ZONE"] = [] + + zone_header = True + empty = 0 + + elif zone_header: + t = line.split(",") + zones = [int(i.split()[-1]) for i in t[1:] if i not in ("",)] + + for zone in zones: + data["KSTP"].append(kstp) + data["KPER"].append(kper) + data["ZONE"].append(zone) + data["TOTIM"].append(totim) + + zone_header = False + read_data = True + + elif read_data: + + t = line.split(",") + if "IN" in t[1]: + prefix = "FROM_" + + elif "OUT" in t[1]: + prefix = "TO_" + + else: + if "ZONE" in t[0] or "TOTAL" in t[0] or "IN-OUT" in t[0]: + label = "_".join(t[0].split()) + elif "PERCENT ERROR" in line: + label = "_".join(t[0].split()) + read_data = False + else: + label = prefix + "_".join(t[0].split()) + + if label not in data: + data[label] = [] + + for val in t[1:]: + if val in ("",): + continue + + data[label].append(float(val)) + + elif line in ("", " "): + empty += 1 + + else: + pass + + if empty >= 25: + break + + return _zb_dict_to_recarray(data) + + +def _read_zb_csv2(fname, add_prefix=True, aliases=None): + """ + Method to read CSV2 output from zonebudget and CSV output + from Zonebudget6 + + Parameters + ---------- + fname : str + zonebudget output file name + add_prefix : bool + boolean flag to add "TO_", "FROM_" prefixes to column headings + Returns + ------- + np.recarray + """ + with open(fname) as foo: + # read the header and create the dtype + h = foo.readline().upper().strip().split(",") + h = [i.strip() for i in h if i] + dtype = [] + prefix = "FROM_" + for col in h: + col = col.replace("-", "_") + if not add_prefix: + prefix = "" + if col in ("TOTIM", "PERIOD", "STEP", "KSTP", "KPER", "ZONE"): + if col in ("ZONE", "STEP", "KPER", "KSTP", "PERIOD"): + if col == "STEP": + col = "KSTP" + elif col == "PERIOD": + col = "KPER" + dtype.append((col, int)) + + else: + dtype.append((col, float)) + + elif col == "TOTAL IN": + dtype.append(("_".join(col.split()), float)) + prefix = "TO_" + elif col == "TOTAL OUT": + dtype.append(("_".join(col.split()), float)) + prefix = "" + elif col in ("FROM OTHER ZONES", "TO OTHER ZONES"): + dtype.append(("_".join(col.split()), float)) + elif col == "IN_OUT": + dtype.append(("IN-OUT", float)) + else: + dtype.append((prefix + "_".join(col.split()), float)) + + array = np.genfromtxt(foo, delimiter=",").T + if len(array) != len(dtype): + array = array[:-1] + array.shape = (len(dtype), -1) + data = {name[0]: list(array[ix]) for ix, name in enumerate(dtype)} + data["KPER"] = list(np.array(data["KPER"]) - 1) + data["KSTP"] = list(np.array(data["KSTP"]) - 1) + return _zb_dict_to_recarray(data, aliases=aliases) + + +def _zb_dict_to_recarray(data, aliases=None): + """ + Method to check the zonebudget dictionary and convert it to a + numpy recarray. + + Parameters + ---------- + data : dict + dictionary of zonebudget data from CSV 1 or ZBLST files + + Returns + ------- + np.recarray + """ + # if steady state is used, storage will not be written + if "FROM_STORAGE" in data: + if len(data["FROM_STORAGE"]) < len(data["ZONE"]): + adj = len(data["ZONE"]) - len(data["FROM_STORAGE"]) + adj = [0] * adj + data["FROM_STORAGE"] = adj + data["FROM_STORAGE"] + data["TO_STORAGE"] = adj + data["TO_STORAGE"] + + zones = list(np.unique(data["ZONE"])) + zone_dtypes = [] + for zn in zones: + if aliases is not None: + if zn in aliases: + zone_dtypes.append((aliases[zn], float)) + else: + zone_dtypes.append((f"ZONE_{int(zn)}", float)) + else: + zone_dtypes.append((f"ZONE_{int(zn)}", float)) + + dtype = [ + ("totim", float), + ("time_step", int), + ("stress_period", int), + ("name", object), + ] + zone_dtypes + + if "TOTIM" not in data: + dtype.pop(0) + + array = [] + allzones = data["ZONE"] + for strt in range(0, len(data["ZONE"]), len(zones)): + end = strt + len(zones) + kstp = data["KSTP"][strt] + kper = data["KPER"][strt] + totim = None + if "TOTIM" in data: + totim = data["TOTIM"][strt] + + for name, values in data.items(): + if name in ("KSTP", "KPER", "TOTIM", "ZONE"): + continue + rec = [kstp, kper, name] + if totim is not None: + rec = [totim] + rec + tmp = values[strt:end] + tzones = allzones[strt:end] + # check zone numbering matches header numbering, if not re-order + if tzones != zones: + idx = [zones.index(z) for z in tzones] + tmp = [tmp[i] for i in idx] + + array.append(tuple(rec + tmp)) + + array = np.array(array, dtype=dtype) + return array.view(type=np.recarray) + + +def _pivot_recarray(recarray): + """ + Method to pivot the zb output recarray to be compatible + with the ZoneBudgetOutput method until the class is deprecated + + Returns + ------- + + """ + dtype = [("totim", float), ("kper", int), ("kstp", int), ("zone", int)] + record_names = np.unique(recarray["name"]) + for rec_name in record_names: + dtype.append((rec_name, float)) + + rnames = recarray.dtype.names + zones = {i: int(i.split("_")[-1]) for i in rnames if i.startswith("ZONE")} + + kstp_kper = np.vstack( + sorted({(rec["time_step"], rec["stress_period"]) for rec in recarray}) + ) + pvt_rec = np.recarray((1,), dtype=dtype) + n = 0 + for kstp, kper in kstp_kper: + idxs = np.where( + (recarray["time_step"] == kstp) + & (recarray["stress_period"] == kper) + ) + if len(idxs) == 0: + pass + else: + temp = recarray[idxs] + for zonename, zone in zones.items(): + if n != 0: + pvt_rec.resize((len(pvt_rec) + 1,), refcheck=False) + pvt_rec["kstp"][-1] = kstp + pvt_rec["kper"][-1] = kper + pvt_rec["zone"][-1] = zone + for rec in temp: + pvt_rec[rec["name"]][-1] = rec[zonename] + + if "totim" in rnames: + pvt_rec["totim"][-1] = temp["totim"][-1] + else: + pvt_rec["totim"][-1] = 0 + + n += 1 + return pvt_rec + + +def _volumetric_flux(recarray, modeltime, extrapolate_kper=False): + """ + Method to generate a volumetric budget table based on flux information + + Parameters + ---------- + recarray : np.recarray + pivoted numpy recarray of zonebudget fluxes + modeltime : flopy.discretization.ModelTime object + flopy modeltime object + extrapolate_kper : bool + flag to determine if we fill in data gaps with other + timestep information from the same stress period. + if True, we assume that flux is constant throughout a stress period + and the pandas dataframe returned contains a + volumetric budget per stress period + + if False, calculates volumes from available flux data + + Returns + ------- + pd.DataFrame + + """ + pd = import_optional_dependency( + "pandas", + error_message="ZoneBudget._volumetric_flux() requires pandas.", + ) + + nper = len(modeltime.nstp) + volumetric_data = {} + zones = np.unique(recarray["zone"]) + + for key in recarray.dtype.names: + volumetric_data[key] = [] + + if extrapolate_kper: + volumetric_data.pop("kstp") + perlen = modeltime.perlen + totim = np.add.accumulate(perlen) + for per in range(nper): + idx = np.where(recarray["kper"] == per)[0] + + if len(idx) == 0: + continue + + temp = recarray[idx] + + for zone in zones: + if zone == 0: + continue + + zix = np.where(temp["zone"] == zone)[0] + + if len(zix) == 0: + raise Exception + + for key in recarray.dtype.names: + if key == "totim": + volumetric_data[key].append(totim[per]) + + elif key == "tslen": + volumetric_data["perlen"].append(perlen[per]) + + elif key == "kstp": + continue + + elif key == "kper": + volumetric_data[key].append(per) + + elif key == "zone": + volumetric_data[key].append(zone) + + else: + tmp = np.nanmean(temp[zix][key]) + vol = tmp * perlen[per] + volumetric_data[key].append(vol) + + else: + n = 0 + tslen = {} + dtotim = {} + totim = modeltime.totim + for ix, nstp in enumerate(modeltime.nstp): + for stp in range(nstp): + idx = np.where( + (recarray["kper"] == ix) & (recarray["kstp"] == stp) + ) + if len(idx[0]) == 0: + continue + elif n == 0: + tslen[(stp, ix)] = totim[n] + else: + tslen[(stp, ix)] = totim[n] - totim[n - 1] + dtotim[(stp, ix)] = totim[n] + n += 1 + + ltslen = [tslen[(rec["kstp"], rec["kper"])] for rec in recarray] + if len(np.unique(recarray["totim"])) == 1: + ltotim = [dtotim[(rec["kstp"], rec["kper"])] for rec in recarray] + recarray["totim"] = ltotim + + for name in recarray.dtype.names: + if name in ("zone", "kstp", "kper", "tslen", "totim"): + volumetric_data[name] = recarray[name] + else: + volumetric_data[name] = recarray[name] * ltslen + + return pd.DataFrame.from_dict(volumetric_data) + + +def dataframe_to_netcdf_fmt(df, zone_array, flux=True): + """ + Method to transform a volumetric zonebudget dataframe into + array format for netcdf. + + time is on axis 0 + zone is on axis 1 + + Parameters + ---------- + df : pd.DataFrame + zone_array : np.ndarray + zonebudget zones array + flux : bool + boolean flag to indicate if budget data is a flux "L^3/T" (True, + default) or if the data have been processed to + volumetric values "L^3" (False) + + Returns + ------- + ZBNetOutput object + + """ + zones = np.sort(np.unique(df.zone.values)) + totim = np.sort(np.unique(df.totim.values)) + + data = {} + for col in df.columns: + if col in ("totim", "zone", "kper", "kstp", "perlen"): + pass + else: + data[col] = np.zeros((totim.size, zones.size), dtype=float) + + for i, time in enumerate(totim): + tdf = df.loc[ + df.totim.isin( + [ + time, + ] + ) + ] + tdf = tdf.sort_values(by=["zone"]) + + for col in df.columns: + if col in ("totim", "zone", "kper", "kstp", "perlen"): + pass + else: + data[col][i, :] = tdf[col].values + + return ZBNetOutput(zones, totim, data, zone_array, flux=flux) + + +class ZBNetOutput: + """ + Class that holds zonebudget netcdf output and allows export utilities + to recognize the output data type. + + Parameters + ---------- + zones : np.ndarray + array of zone numbers + time : np.ndarray + array of totim + arrays : dict + dictionary of budget term arrays. + axis 0 is totim, + axis 1 is zones + flux : bool + boolean flag to indicate if budget data is a flux "L^3/T"(True, + default) or if the data have been processed to + volumetric values "L^3" (False) + """ + + def __init__(self, zones, time, arrays, zone_array, flux=True): + self.zones = zones + self.time = time + self.arrays = arrays + self.zone_array = zone_array + self.flux = flux